1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
12 #include "btree_write_buffer.h"
14 #include "buckets_waiting_for_journal.h"
23 #include <linux/kthread.h>
24 #include <linux/math64.h>
25 #include <linux/random.h>
26 #include <linux/rculist.h>
27 #include <linux/rcupdate.h>
28 #include <linux/sched/task.h>
29 #include <linux/sort.h>
30 #include <trace/events/bcachefs.h>
32 /* Persistent alloc info: */
34 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
35 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
40 struct bkey_alloc_unpacked {
47 #define x(_name, _bits) u##_bits _name;
52 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
53 const void **p, unsigned field)
55 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
58 if (!(a->fields & (1 << field)))
63 v = *((const u8 *) *p);
82 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
83 unsigned field, u64 v)
85 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
90 a->v.fields |= 1 << field;
97 *((__le16 *) *p) = cpu_to_le16(v);
100 *((__le32 *) *p) = cpu_to_le32(v);
103 *((__le64 *) *p) = cpu_to_le64(v);
112 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
115 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
116 const void *d = in->data;
121 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
122 BCH_ALLOC_FIELDS_V1()
126 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
129 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
130 const u8 *in = a.v->data;
131 const u8 *end = bkey_val_end(a);
132 unsigned fieldnr = 0;
137 out->oldest_gen = a.v->oldest_gen;
138 out->data_type = a.v->data_type;
140 #define x(_name, _bits) \
141 if (fieldnr < a.v->nr_fields) { \
142 ret = bch2_varint_decode_fast(in, end, &v); \
150 if (v != out->_name) \
154 BCH_ALLOC_FIELDS_V2()
159 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
162 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
163 const u8 *in = a.v->data;
164 const u8 *end = bkey_val_end(a);
165 unsigned fieldnr = 0;
170 out->oldest_gen = a.v->oldest_gen;
171 out->data_type = a.v->data_type;
172 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
173 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
174 out->journal_seq = le64_to_cpu(a.v->journal_seq);
176 #define x(_name, _bits) \
177 if (fieldnr < a.v->nr_fields) { \
178 ret = bch2_varint_decode_fast(in, end, &v); \
186 if (v != out->_name) \
190 BCH_ALLOC_FIELDS_V2()
195 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
197 struct bkey_alloc_unpacked ret = { .gen = 0 };
201 bch2_alloc_unpack_v1(&ret, k);
203 case KEY_TYPE_alloc_v2:
204 bch2_alloc_unpack_v2(&ret, k);
206 case KEY_TYPE_alloc_v3:
207 bch2_alloc_unpack_v3(&ret, k);
214 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
216 unsigned i, bytes = offsetof(struct bch_alloc, data);
218 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
219 if (a->fields & (1 << i))
220 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
222 return DIV_ROUND_UP(bytes, sizeof(u64));
225 int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
226 unsigned flags, struct printbuf *err)
228 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
230 /* allow for unknown fields */
231 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
232 prt_printf(err, "incorrect value size (%zu < %u)",
233 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
234 return -BCH_ERR_invalid_bkey;
240 int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
241 unsigned flags, struct printbuf *err)
243 struct bkey_alloc_unpacked u;
245 if (bch2_alloc_unpack_v2(&u, k)) {
246 prt_printf(err, "unpack error");
247 return -BCH_ERR_invalid_bkey;
253 int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
254 unsigned flags, struct printbuf *err)
256 struct bkey_alloc_unpacked u;
258 if (bch2_alloc_unpack_v3(&u, k)) {
259 prt_printf(err, "unpack error");
260 return -BCH_ERR_invalid_bkey;
266 int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
267 unsigned flags, struct printbuf *err)
269 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
270 int rw = flags & WRITE;
272 if (alloc_v4_u64s(a.v) != bkey_val_u64s(k.k)) {
273 prt_printf(err, "bad val size (%lu != %u)",
274 bkey_val_u64s(k.k), alloc_v4_u64s(a.v));
275 return -BCH_ERR_invalid_bkey;
278 if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
279 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
280 prt_printf(err, "invalid backpointers_start");
281 return -BCH_ERR_invalid_bkey;
285 !(flags & BKEY_INVALID_FROM_JOURNAL) &&
286 test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
287 unsigned i, bp_len = 0;
289 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++)
290 bp_len += alloc_v4_backpointers_c(a.v)[i].bucket_len;
292 if (bp_len > a.v->dirty_sectors) {
293 prt_printf(err, "too many backpointers");
294 return -BCH_ERR_invalid_bkey;
299 if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
300 prt_printf(err, "invalid data type (got %u should be %u)",
301 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
302 return -BCH_ERR_invalid_bkey;
305 switch (a.v->data_type) {
307 case BCH_DATA_need_gc_gens:
308 case BCH_DATA_need_discard:
309 if (a.v->dirty_sectors ||
310 a.v->cached_sectors ||
312 prt_printf(err, "empty data type free but have data");
313 return -BCH_ERR_invalid_bkey;
317 case BCH_DATA_journal:
320 case BCH_DATA_parity:
321 if (!a.v->dirty_sectors) {
322 prt_printf(err, "data_type %s but dirty_sectors==0",
323 bch2_data_types[a.v->data_type]);
324 return -BCH_ERR_invalid_bkey;
327 case BCH_DATA_cached:
328 if (!a.v->cached_sectors ||
329 a.v->dirty_sectors ||
331 prt_printf(err, "data type inconsistency");
332 return -BCH_ERR_invalid_bkey;
335 if (!a.v->io_time[READ] &&
336 test_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags)) {
337 prt_printf(err, "cached bucket with read_time == 0");
338 return -BCH_ERR_invalid_bkey;
341 case BCH_DATA_stripe:
343 prt_printf(err, "data_type %s but stripe==0",
344 bch2_data_types[a.v->data_type]);
345 return -BCH_ERR_invalid_bkey;
354 static inline u64 swab40(u64 x)
356 return (((x & 0x00000000ffULL) << 32)|
357 ((x & 0x000000ff00ULL) << 16)|
358 ((x & 0x0000ff0000ULL) >> 0)|
359 ((x & 0x00ff000000ULL) >> 16)|
360 ((x & 0xff00000000ULL) >> 32));
363 void bch2_alloc_v4_swab(struct bkey_s k)
365 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
366 struct bch_backpointer *bp, *bps;
368 a->journal_seq = swab64(a->journal_seq);
369 a->flags = swab32(a->flags);
370 a->dirty_sectors = swab32(a->dirty_sectors);
371 a->cached_sectors = swab32(a->cached_sectors);
372 a->io_time[0] = swab64(a->io_time[0]);
373 a->io_time[1] = swab64(a->io_time[1]);
374 a->stripe = swab32(a->stripe);
375 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
377 bps = alloc_v4_backpointers(a);
378 for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
379 bp->bucket_offset = swab40(bp->bucket_offset);
380 bp->bucket_len = swab32(bp->bucket_len);
381 bch2_bpos_swab(&bp->pos);
385 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
387 struct bch_alloc_v4 _a;
388 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
392 printbuf_indent_add(out, 2);
394 prt_printf(out, "gen %u oldest_gen %u data_type %s",
395 a->gen, a->oldest_gen,
396 a->data_type < BCH_DATA_NR
397 ? bch2_data_types[a->data_type]
398 : "(invalid data type)");
400 prt_printf(out, "journal_seq %llu", a->journal_seq);
402 prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
404 prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
406 prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
408 prt_printf(out, "cached_sectors %u", a->cached_sectors);
410 prt_printf(out, "stripe %u", a->stripe);
412 prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
414 prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
416 prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
418 prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
420 prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
423 if (BCH_ALLOC_V4_NR_BACKPOINTERS(a)) {
424 struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
425 const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
427 prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
428 printbuf_indent_add(out, 2);
430 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
432 bch2_backpointer_to_text(out, &bps[i]);
435 printbuf_indent_sub(out, 2);
438 printbuf_indent_sub(out, 2);
441 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
443 if (k.k->type == KEY_TYPE_alloc_v4) {
446 *out = *bkey_s_c_to_alloc_v4(k).v;
448 src = alloc_v4_backpointers(out);
449 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
450 dst = alloc_v4_backpointers(out);
453 memset(src, 0, dst - src);
455 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
457 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
459 *out = (struct bch_alloc_v4) {
460 .journal_seq = u.journal_seq,
461 .flags = u.need_discard,
463 .oldest_gen = u.oldest_gen,
464 .data_type = u.data_type,
465 .stripe_redundancy = u.stripe_redundancy,
466 .dirty_sectors = u.dirty_sectors,
467 .cached_sectors = u.cached_sectors,
468 .io_time[READ] = u.read_time,
469 .io_time[WRITE] = u.write_time,
473 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
477 static noinline struct bkey_i_alloc_v4 *
478 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
480 struct bkey_i_alloc_v4 *ret;
482 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
486 if (k.k->type == KEY_TYPE_alloc_v4) {
489 bkey_reassemble(&ret->k_i, k);
491 src = alloc_v4_backpointers(&ret->v);
492 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
493 dst = alloc_v4_backpointers(&ret->v);
496 memset(src, 0, dst - src);
498 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
499 set_alloc_v4_u64s(ret);
501 bkey_alloc_v4_init(&ret->k_i);
503 bch2_alloc_to_v4(k, &ret->v);
508 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
510 struct bkey_s_c_alloc_v4 a;
512 if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
513 ((a = bkey_s_c_to_alloc_v4(k), true) &&
514 BCH_ALLOC_V4_BACKPOINTERS_START(a.v) == BCH_ALLOC_V4_U64s &&
515 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0)) {
517 * Reserve space for one more backpointer here:
518 * Not sketchy at doing it this way, nope...
520 struct bkey_i_alloc_v4 *ret =
521 bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
523 bkey_reassemble(&ret->k_i, k);
527 return __bch2_alloc_to_v4_mut(trans, k);
530 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
532 return bch2_alloc_to_v4_mut_inlined(trans, k);
535 struct bkey_i_alloc_v4 *
536 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
540 struct bkey_i_alloc_v4 *a;
543 bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
544 BTREE_ITER_WITH_UPDATES|
547 k = bch2_btree_iter_peek_slot(iter);
552 a = bch2_alloc_to_v4_mut_inlined(trans, k);
553 ret = PTR_ERR_OR_ZERO(a);
558 bch2_trans_iter_exit(trans, iter);
562 int bch2_alloc_read(struct bch_fs *c)
564 struct btree_trans trans;
565 struct btree_iter iter;
567 struct bch_alloc_v4 a;
571 bch2_trans_init(&trans, c, 0, 0);
573 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
574 BTREE_ITER_PREFETCH, k, ret) {
576 * Not a fsck error because this is checked/repaired by
577 * bch2_check_alloc_key() which runs later:
579 if (!bch2_dev_bucket_exists(c, k.k->p))
582 ca = bch_dev_bkey_exists(c, k.k->p.inode);
584 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
586 bch2_trans_iter_exit(&trans, &iter);
588 bch2_trans_exit(&trans);
591 bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
596 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
598 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
600 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
604 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
606 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
607 pos.offset += offset;
611 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
613 return k.k->type == KEY_TYPE_bucket_gens
614 ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
618 int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k,
619 unsigned flags, struct printbuf *err)
621 if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) {
622 prt_printf(err, "bad val size (%lu != %zu)",
623 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
624 return -BCH_ERR_invalid_bkey;
630 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
632 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
635 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
638 prt_printf(out, "%u", g.v->gens[i]);
642 int bch2_bucket_gens_init(struct bch_fs *c)
644 struct btree_trans trans;
645 struct btree_iter iter;
647 struct bch_alloc_v4 a;
648 struct bkey_i_bucket_gens g;
649 bool have_bucket_gens_key = false;
655 bch2_trans_init(&trans, c, 0, 0);
657 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
658 BTREE_ITER_PREFETCH, k, ret) {
660 * Not a fsck error because this is checked/repaired by
661 * bch2_check_alloc_key() which runs later:
663 if (!bch2_dev_bucket_exists(c, k.k->p))
666 gen = bch2_alloc_to_v4(k, &a)->gen;
667 pos = alloc_gens_pos(iter.pos, &offset);
669 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
670 ret = commit_do(&trans, NULL, NULL,
672 BTREE_INSERT_LAZY_RW,
673 __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i, 0));
676 have_bucket_gens_key = false;
679 if (!have_bucket_gens_key) {
680 bkey_bucket_gens_init(&g.k_i);
682 have_bucket_gens_key = true;
685 g.v.gens[offset] = gen;
687 bch2_trans_iter_exit(&trans, &iter);
689 if (have_bucket_gens_key && !ret)
690 ret = commit_do(&trans, NULL, NULL,
692 BTREE_INSERT_LAZY_RW,
693 __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i, 0));
695 bch2_trans_exit(&trans);
698 bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
703 int bch2_bucket_gens_read(struct bch_fs *c)
705 struct btree_trans trans;
706 struct btree_iter iter;
708 const struct bch_bucket_gens *g;
713 bch2_trans_init(&trans, c, 0, 0);
715 for_each_btree_key(&trans, iter, BTREE_ID_bucket_gens, POS_MIN,
716 BTREE_ITER_PREFETCH, k, ret) {
717 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
718 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
720 if (k.k->type != KEY_TYPE_bucket_gens)
723 g = bkey_s_c_to_bucket_gens(k).v;
726 * Not a fsck error because this is checked/repaired by
727 * bch2_check_alloc_key() which runs later:
729 if (!bch2_dev_exists2(c, k.k->p.inode))
732 ca = bch_dev_bkey_exists(c, k.k->p.inode);
734 for (b = max_t(u64, ca->mi.first_bucket, start);
735 b < min_t(u64, ca->mi.nbuckets, end);
737 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
739 bch2_trans_iter_exit(&trans, &iter);
741 bch2_trans_exit(&trans);
744 bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
749 /* Free space/discard btree: */
751 static int bch2_bucket_do_index(struct btree_trans *trans,
752 struct bkey_s_c alloc_k,
753 const struct bch_alloc_v4 *a,
756 struct bch_fs *c = trans->c;
757 struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
758 struct btree_iter iter;
762 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
763 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
764 struct printbuf buf = PRINTBUF;
767 if (a->data_type != BCH_DATA_free &&
768 a->data_type != BCH_DATA_need_discard)
771 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
776 k->k.type = new_type;
778 switch (a->data_type) {
780 btree = BTREE_ID_freespace;
781 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
782 bch2_key_resize(&k->k, 1);
784 case BCH_DATA_need_discard:
785 btree = BTREE_ID_need_discard;
786 k->k.p = alloc_k.k->p;
792 bch2_trans_iter_init(trans, &iter, btree,
793 bkey_start_pos(&k->k),
795 old = bch2_btree_iter_peek_slot(&iter);
800 if (ca->mi.freespace_initialized &&
801 test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags) &&
802 bch2_trans_inconsistent_on(old.k->type != old_type, trans,
803 "incorrect key when %s %s btree (got %s should be %s)\n"
805 set ? "setting" : "clearing",
806 bch2_btree_ids[btree],
807 bch2_bkey_types[old.k->type],
808 bch2_bkey_types[old_type],
809 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
814 ret = bch2_trans_update(trans, &iter, k, 0);
816 bch2_trans_iter_exit(trans, &iter);
821 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
822 struct bpos bucket, u8 gen)
824 struct btree_iter iter;
826 struct bpos pos = alloc_gens_pos(bucket, &offset);
827 struct bkey_i_bucket_gens *g;
831 g = bch2_trans_kmalloc(trans, sizeof(*g));
832 ret = PTR_ERR_OR_ZERO(g);
836 bch2_trans_iter_init(trans, &iter, BTREE_ID_bucket_gens, pos,
838 BTREE_ITER_WITH_UPDATES);
839 k = bch2_btree_iter_peek_slot(&iter);
844 if (k.k->type != KEY_TYPE_bucket_gens) {
845 bkey_bucket_gens_init(&g->k_i);
848 bkey_reassemble(&g->k_i, k);
851 g->v.gens[offset] = gen;
853 ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
855 bch2_trans_iter_exit(trans, &iter);
859 int bch2_trans_mark_alloc(struct btree_trans *trans,
860 enum btree_id btree_id, unsigned level,
861 struct bkey_s_c old, struct bkey_i *new,
864 struct bch_fs *c = trans->c;
865 struct bch_alloc_v4 old_a_convert, *new_a;
866 const struct bch_alloc_v4 *old_a;
867 u64 old_lru, new_lru;
871 * Deletion only happens in the device removal path, with
872 * BTREE_TRIGGER_NORUN:
874 BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
876 old_a = bch2_alloc_to_v4(old, &old_a_convert);
877 new_a = &bkey_i_to_alloc_v4(new)->v;
879 new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
881 if (new_a->dirty_sectors > old_a->dirty_sectors ||
882 new_a->cached_sectors > old_a->cached_sectors) {
883 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
884 new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
885 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
886 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
889 if (data_type_is_empty(new_a->data_type) &&
890 BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
891 !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
893 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
896 if (old_a->data_type != new_a->data_type ||
897 (new_a->data_type == BCH_DATA_free &&
898 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
899 ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
900 bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
905 if (new_a->data_type == BCH_DATA_cached &&
906 !new_a->io_time[READ])
907 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
909 old_lru = alloc_lru_idx_read(*old_a);
910 new_lru = alloc_lru_idx_read(*new_a);
912 if (old_lru != new_lru) {
913 ret = bch2_lru_change(trans, new->k.p.inode,
914 bucket_to_u64(new->k.p),
920 new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
921 bch_dev_bkey_exists(c, new->k.p.inode));
923 if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
924 ret = bch2_lru_change(trans,
925 BCH_LRU_FRAGMENTATION_START,
926 bucket_to_u64(new->k.p),
927 old_a->fragmentation_lru, new_a->fragmentation_lru);
932 if (old_a->gen != new_a->gen) {
933 ret = bch2_bucket_gen_update(trans, new->k.p, new_a->gen);
942 * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
943 * extents style btrees, but works on non-extents btrees:
945 struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
947 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
955 struct btree_iter iter2;
958 bch2_trans_copy_iter(&iter2, iter);
960 if (!bpos_eq(iter->path->l[0].b->key.k.p, SPOS_MAX))
961 end = bkey_min(end, bpos_nosnap_successor(iter->path->l[0].b->key.k.p));
963 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
966 * btree node min/max is a closed interval, upto takes a half
969 k = bch2_btree_iter_peek_upto(&iter2, end);
971 bch2_trans_iter_exit(iter->trans, &iter2);
973 BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
981 bch2_key_resize(hole, next.offset - iter->pos.offset);
982 return (struct bkey_s_c) { hole, NULL };
986 static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
991 if (bch2_dev_bucket_exists(c, *bucket))
994 if (bch2_dev_exists2(c, bucket->inode)) {
995 ca = bch_dev_bkey_exists(c, bucket->inode);
997 if (bucket->offset < ca->mi.first_bucket) {
998 bucket->offset = ca->mi.first_bucket;
1007 iter = bucket->inode;
1008 ca = __bch2_next_dev(c, &iter, NULL);
1010 *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
1016 struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
1018 struct bch_fs *c = iter->trans->c;
1021 k = bch2_get_key_or_hole(iter, POS_MAX, hole);
1026 struct bpos bucket = bkey_start_pos(k.k);
1028 if (!bch2_dev_bucket_exists(c, bucket)) {
1029 if (!next_bucket(c, &bucket))
1030 return bkey_s_c_null;
1032 bch2_btree_iter_set_pos(iter, bucket);
1036 if (!bch2_dev_bucket_exists(c, k.k->p)) {
1037 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
1039 bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
1046 static int bch2_check_alloc_key(struct btree_trans *trans,
1047 struct bkey_s_c alloc_k,
1048 struct btree_iter *alloc_iter,
1049 struct btree_iter *discard_iter,
1050 struct btree_iter *freespace_iter,
1051 struct btree_iter *bucket_gens_iter)
1053 struct bch_fs *c = trans->c;
1055 struct bch_alloc_v4 a_convert;
1056 const struct bch_alloc_v4 *a;
1057 unsigned discard_key_type, freespace_key_type;
1058 unsigned gens_offset;
1060 struct printbuf buf = PRINTBUF;
1063 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
1064 "alloc key for invalid device:bucket %llu:%llu",
1065 alloc_k.k->p.inode, alloc_k.k->p.offset))
1066 return bch2_btree_delete_at(trans, alloc_iter, 0);
1068 ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
1069 if (!ca->mi.freespace_initialized)
1072 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1074 discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1075 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1076 k = bch2_btree_iter_peek_slot(discard_iter);
1081 if (k.k->type != discard_key_type &&
1082 (c->opts.reconstruct_alloc ||
1083 fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
1085 bch2_bkey_types[k.k->type],
1086 bch2_bkey_types[discard_key_type],
1087 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1088 struct bkey_i *update =
1089 bch2_trans_kmalloc(trans, sizeof(*update));
1091 ret = PTR_ERR_OR_ZERO(update);
1095 bkey_init(&update->k);
1096 update->k.type = discard_key_type;
1097 update->k.p = discard_iter->pos;
1099 ret = bch2_trans_update(trans, discard_iter, update, 0);
1104 freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1105 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1106 k = bch2_btree_iter_peek_slot(freespace_iter);
1111 if (k.k->type != freespace_key_type &&
1112 (c->opts.reconstruct_alloc ||
1113 fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
1115 bch2_bkey_types[k.k->type],
1116 bch2_bkey_types[freespace_key_type],
1117 (printbuf_reset(&buf),
1118 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1119 struct bkey_i *update =
1120 bch2_trans_kmalloc(trans, sizeof(*update));
1122 ret = PTR_ERR_OR_ZERO(update);
1126 bkey_init(&update->k);
1127 update->k.type = freespace_key_type;
1128 update->k.p = freespace_iter->pos;
1129 bch2_key_resize(&update->k, 1);
1131 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1136 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1137 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1142 if (a->gen != alloc_gen(k, gens_offset) &&
1143 (c->opts.reconstruct_alloc ||
1144 fsck_err(c, "incorrect gen in bucket_gens btree (got %u should be %u)\n"
1146 alloc_gen(k, gens_offset), a->gen,
1147 (printbuf_reset(&buf),
1148 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1149 struct bkey_i_bucket_gens *g =
1150 bch2_trans_kmalloc(trans, sizeof(*g));
1152 ret = PTR_ERR_OR_ZERO(g);
1156 if (k.k->type == KEY_TYPE_bucket_gens) {
1157 bkey_reassemble(&g->k_i, k);
1159 bkey_bucket_gens_init(&g->k_i);
1160 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1163 g->v.gens[gens_offset] = a->gen;
1165 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1171 printbuf_exit(&buf);
1175 static int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1178 struct btree_iter *freespace_iter)
1180 struct bch_fs *c = trans->c;
1183 struct printbuf buf = PRINTBUF;
1186 ca = bch_dev_bkey_exists(c, start.inode);
1187 if (!ca->mi.freespace_initialized)
1190 bch2_btree_iter_set_pos(freespace_iter, start);
1192 k = bch2_btree_iter_peek_slot(freespace_iter);
1197 *end = bkey_min(k.k->p, *end);
1199 if (k.k->type != KEY_TYPE_set &&
1200 (c->opts.reconstruct_alloc ||
1201 fsck_err(c, "hole in alloc btree missing in freespace btree\n"
1202 " device %llu buckets %llu-%llu",
1203 freespace_iter->pos.inode,
1204 freespace_iter->pos.offset,
1206 struct bkey_i *update =
1207 bch2_trans_kmalloc(trans, sizeof(*update));
1209 ret = PTR_ERR_OR_ZERO(update);
1213 bkey_init(&update->k);
1214 update->k.type = KEY_TYPE_set;
1215 update->k.p = freespace_iter->pos;
1216 bch2_key_resize(&update->k,
1217 min_t(u64, U32_MAX, end->offset -
1218 freespace_iter->pos.offset));
1220 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1226 printbuf_exit(&buf);
1230 static int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1233 struct btree_iter *bucket_gens_iter)
1235 struct bch_fs *c = trans->c;
1237 struct printbuf buf = PRINTBUF;
1238 unsigned i, gens_offset, gens_end_offset;
1241 if (c->sb.version < bcachefs_metadata_version_bucket_gens &&
1242 !c->opts.version_upgrade)
1245 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1247 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1252 if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1253 alloc_gens_pos(*end, &gens_end_offset)))
1254 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1256 if (k.k->type == KEY_TYPE_bucket_gens) {
1257 struct bkey_i_bucket_gens g;
1258 bool need_update = false;
1260 bkey_reassemble(&g.k_i, k);
1262 for (i = gens_offset; i < gens_end_offset; i++) {
1263 if (fsck_err_on(g.v.gens[i], c,
1264 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1265 bucket_gens_pos_to_alloc(k.k->p, i).inode,
1266 bucket_gens_pos_to_alloc(k.k->p, i).offset,
1274 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(g));
1276 ret = PTR_ERR_OR_ZERO(k);
1280 memcpy(k, &g, sizeof(g));
1282 ret = bch2_trans_update(trans, bucket_gens_iter, k, 0);
1288 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1291 printbuf_exit(&buf);
1295 static int bch2_check_discard_freespace_key(struct btree_trans *trans,
1296 struct btree_iter *iter)
1298 struct bch_fs *c = trans->c;
1299 struct btree_iter alloc_iter;
1300 struct bkey_s_c alloc_k;
1301 struct bch_alloc_v4 a_convert;
1302 const struct bch_alloc_v4 *a;
1305 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1306 ? BCH_DATA_need_discard
1308 struct printbuf buf = PRINTBUF;
1312 pos.offset &= ~(~0ULL << 56);
1313 genbits = iter->pos.offset & (~0ULL << 56);
1315 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1317 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1318 "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1319 bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
1322 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
1323 ret = bkey_err(alloc_k);
1327 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1329 if (fsck_err_on(a->data_type != state ||
1330 (state == BCH_DATA_free &&
1331 genbits != alloc_freespace_genbits(*a)), c,
1332 "%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
1333 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1334 bch2_btree_ids[iter->btree_id],
1335 a->data_type == state,
1336 genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1341 bch2_trans_iter_exit(trans, &alloc_iter);
1342 printbuf_exit(&buf);
1345 ret = bch2_btree_delete_extent_at(trans, iter,
1346 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0);
1351 * We've already checked that generation numbers in the bucket_gens btree are
1352 * valid for buckets that exist; this just checks for keys for nonexistent
1355 static int bch2_check_bucket_gens_key(struct btree_trans *trans,
1356 struct btree_iter *iter,
1359 struct bch_fs *c = trans->c;
1360 struct bkey_i_bucket_gens g;
1362 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1363 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1365 bool need_update = false;
1366 struct printbuf buf = PRINTBUF;
1369 BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1370 bkey_reassemble(&g.k_i, k);
1372 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
1373 "bucket_gens key for invalid device:\n %s",
1374 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1375 ret = bch2_btree_delete_at(trans, iter, 0);
1379 ca = bch_dev_bkey_exists(c, k.k->p.inode);
1380 if (fsck_err_on(end <= ca->mi.first_bucket ||
1381 start >= ca->mi.nbuckets, c,
1382 "bucket_gens key for invalid buckets:\n %s",
1383 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1384 ret = bch2_btree_delete_at(trans, iter, 0);
1388 for (b = start; b < ca->mi.first_bucket; b++)
1389 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1390 "bucket_gens key has nonzero gen for invalid bucket")) {
1391 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1395 for (b = ca->mi.nbuckets; b < end; b++)
1396 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1397 "bucket_gens key has nonzero gen for invalid bucket")) {
1398 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1405 k = bch2_trans_kmalloc(trans, sizeof(g));
1406 ret = PTR_ERR_OR_ZERO(k);
1410 memcpy(k, &g, sizeof(g));
1411 ret = bch2_trans_update(trans, iter, k, 0);
1415 printbuf_exit(&buf);
1419 int bch2_check_alloc_info(struct bch_fs *c)
1421 struct btree_trans trans;
1422 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1427 bch2_trans_init(&trans, c, 0, 0);
1429 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
1430 BTREE_ITER_PREFETCH);
1431 bch2_trans_iter_init(&trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1432 BTREE_ITER_PREFETCH);
1433 bch2_trans_iter_init(&trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1434 BTREE_ITER_PREFETCH);
1435 bch2_trans_iter_init(&trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1436 BTREE_ITER_PREFETCH);
1441 bch2_trans_begin(&trans);
1443 k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
1452 next = bpos_nosnap_successor(k.k->p);
1454 ret = bch2_check_alloc_key(&trans,
1464 ret = bch2_check_alloc_hole_freespace(&trans,
1465 bkey_start_pos(k.k),
1468 bch2_check_alloc_hole_bucket_gens(&trans,
1469 bkey_start_pos(k.k),
1476 ret = bch2_trans_commit(&trans, NULL, NULL,
1477 BTREE_INSERT_NOFAIL|
1478 BTREE_INSERT_LAZY_RW);
1482 bch2_btree_iter_set_pos(&iter, next);
1484 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1489 bch2_trans_iter_exit(&trans, &bucket_gens_iter);
1490 bch2_trans_iter_exit(&trans, &freespace_iter);
1491 bch2_trans_iter_exit(&trans, &discard_iter);
1492 bch2_trans_iter_exit(&trans, &iter);
1497 ret = for_each_btree_key_commit(&trans, iter,
1498 BTREE_ID_need_discard, POS_MIN,
1499 BTREE_ITER_PREFETCH, k,
1500 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1501 bch2_check_discard_freespace_key(&trans, &iter)) ?:
1502 for_each_btree_key_commit(&trans, iter,
1503 BTREE_ID_freespace, POS_MIN,
1504 BTREE_ITER_PREFETCH, k,
1505 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1506 bch2_check_discard_freespace_key(&trans, &iter)) ?:
1507 for_each_btree_key_commit(&trans, iter,
1508 BTREE_ID_bucket_gens, POS_MIN,
1509 BTREE_ITER_PREFETCH, k,
1510 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1511 bch2_check_bucket_gens_key(&trans, &iter, k));
1513 bch2_trans_exit(&trans);
1514 return ret < 0 ? ret : 0;
1517 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1518 struct btree_iter *alloc_iter)
1520 struct bch_fs *c = trans->c;
1521 struct btree_iter lru_iter;
1522 struct bch_alloc_v4 a_convert;
1523 const struct bch_alloc_v4 *a;
1524 struct bkey_s_c alloc_k, k;
1525 struct printbuf buf = PRINTBUF;
1528 alloc_k = bch2_btree_iter_peek(alloc_iter);
1532 ret = bkey_err(alloc_k);
1536 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1538 if (a->data_type != BCH_DATA_cached)
1541 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
1542 lru_pos(alloc_k.k->p.inode,
1543 bucket_to_u64(alloc_k.k->p),
1544 a->io_time[READ]), 0);
1545 k = bch2_btree_iter_peek_slot(&lru_iter);
1550 if (fsck_err_on(!a->io_time[READ], c,
1551 "cached bucket with read_time 0\n"
1553 (printbuf_reset(&buf),
1554 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
1555 fsck_err_on(k.k->type != KEY_TYPE_set, c,
1556 "missing lru entry\n"
1558 (printbuf_reset(&buf),
1559 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1560 u64 read_time = a->io_time[READ] ?:
1561 atomic64_read(&c->io_clock[READ].now);
1563 ret = bch2_lru_set(trans,
1565 bucket_to_u64(alloc_k.k->p),
1570 if (a->io_time[READ] != read_time) {
1571 struct bkey_i_alloc_v4 *a_mut =
1572 bch2_alloc_to_v4_mut(trans, alloc_k);
1573 ret = PTR_ERR_OR_ZERO(a_mut);
1577 a_mut->v.io_time[READ] = read_time;
1578 ret = bch2_trans_update(trans, alloc_iter,
1579 &a_mut->k_i, BTREE_TRIGGER_NORUN);
1586 bch2_trans_iter_exit(trans, &lru_iter);
1587 printbuf_exit(&buf);
1591 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1593 struct btree_trans trans;
1594 struct btree_iter iter;
1598 bch2_trans_init(&trans, c, 0, 0);
1600 for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
1601 POS_MIN, BTREE_ITER_PREFETCH, k,
1602 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1603 bch2_check_alloc_to_lru_ref(&trans, &iter));
1605 bch2_trans_exit(&trans);
1606 return ret < 0 ? ret : 0;
1609 static int bch2_discard_one_bucket(struct btree_trans *trans,
1610 struct btree_iter *need_discard_iter,
1611 struct bpos *discard_pos_done,
1614 u64 *need_journal_commit,
1617 struct bch_fs *c = trans->c;
1618 struct bpos pos = need_discard_iter->pos;
1619 struct btree_iter iter = { NULL };
1622 struct bkey_i_alloc_v4 *a;
1623 struct printbuf buf = PRINTBUF;
1626 ca = bch_dev_bkey_exists(c, pos.inode);
1627 if (!percpu_ref_tryget(&ca->io_ref)) {
1628 bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1632 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1637 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1638 c->journal.flushed_seq_ondisk,
1639 pos.inode, pos.offset)) {
1640 (*need_journal_commit)++;
1644 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
1645 need_discard_iter->pos,
1647 k = bch2_btree_iter_peek_slot(&iter);
1652 a = bch2_alloc_to_v4_mut(trans, k);
1653 ret = PTR_ERR_OR_ZERO(a);
1657 if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1659 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1663 if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1664 if (test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
1665 bch2_trans_inconsistent(trans,
1666 "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
1669 c->journal.flushed_seq_ondisk,
1670 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1676 if (a->v.data_type != BCH_DATA_need_discard) {
1677 if (test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
1678 bch2_trans_inconsistent(trans,
1679 "bucket incorrectly set in need_discard btree\n"
1681 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1688 if (!bkey_eq(*discard_pos_done, iter.pos) &&
1689 ca->mi.discard && !c->opts.nochanges) {
1691 * This works without any other locks because this is the only
1692 * thread that removes items from the need_discard tree
1694 bch2_trans_unlock(trans);
1695 blkdev_issue_discard(ca->disk_sb.bdev,
1696 k.k->p.offset * ca->mi.bucket_size,
1699 *discard_pos_done = iter.pos;
1701 ret = bch2_trans_relock_notrace(trans);
1706 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1707 a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1709 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1710 bch2_trans_commit(trans, NULL, NULL,
1711 BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
1715 this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
1719 bch2_trans_iter_exit(trans, &iter);
1720 percpu_ref_put(&ca->io_ref);
1721 printbuf_exit(&buf);
1725 static void bch2_do_discards_work(struct work_struct *work)
1727 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1728 struct btree_trans trans;
1729 struct btree_iter iter;
1731 u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
1732 struct bpos discard_pos_done = POS_MAX;
1735 bch2_trans_init(&trans, c, 0, 0);
1738 * We're doing the commit in bch2_discard_one_bucket instead of using
1739 * for_each_btree_key_commit() so that we can increment counters after
1740 * successful commit:
1742 ret = for_each_btree_key2(&trans, iter,
1743 BTREE_ID_need_discard, POS_MIN, 0, k,
1744 bch2_discard_one_bucket(&trans, &iter, &discard_pos_done,
1747 &need_journal_commit,
1750 bch2_trans_exit(&trans);
1752 if (need_journal_commit * 2 > seen)
1753 bch2_journal_flush_async(&c->journal, NULL);
1755 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1757 trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
1761 void bch2_do_discards(struct bch_fs *c)
1763 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
1764 !queue_work(c->write_ref_wq, &c->discard_work))
1765 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1768 static int invalidate_one_bucket(struct btree_trans *trans,
1769 struct btree_iter *lru_iter,
1770 struct bkey_s_c lru_k,
1771 s64 *nr_to_invalidate)
1773 struct bch_fs *c = trans->c;
1774 struct btree_iter alloc_iter = { NULL };
1775 struct bkey_i_alloc_v4 *a = NULL;
1776 struct printbuf buf = PRINTBUF;
1777 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
1778 unsigned cached_sectors;
1781 if (*nr_to_invalidate <= 0)
1784 if (!bch2_dev_bucket_exists(c, bucket)) {
1785 prt_str(&buf, "lru entry points to invalid bucket");
1789 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
1792 a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
1793 ret = PTR_ERR_OR_ZERO(a);
1797 /* We expect harmless races here due to the btree write buffer: */
1798 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
1801 BUG_ON(a->v.data_type != BCH_DATA_cached);
1803 if (!a->v.cached_sectors)
1804 bch_err(c, "invalidating empty bucket, confused");
1806 cached_sectors = a->v.cached_sectors;
1808 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1811 a->v.dirty_sectors = 0;
1812 a->v.cached_sectors = 0;
1813 a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1814 a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
1816 ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
1817 BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
1818 bch2_trans_commit(trans, NULL, NULL,
1819 BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
1823 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1824 --*nr_to_invalidate;
1826 bch2_trans_iter_exit(trans, &alloc_iter);
1827 printbuf_exit(&buf);
1830 prt_str(&buf, "\n lru key: ");
1831 bch2_bkey_val_to_text(&buf, c, lru_k);
1833 prt_str(&buf, "\n lru entry: ");
1834 bch2_lru_pos_to_text(&buf, lru_iter->pos);
1836 prt_str(&buf, "\n alloc key: ");
1838 bch2_bpos_to_text(&buf, bucket);
1840 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1842 bch_err(c, "%s", buf.buf);
1843 if (test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
1844 bch2_inconsistent_error(c);
1851 static void bch2_do_invalidates_work(struct work_struct *work)
1853 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1855 struct btree_trans trans;
1856 struct btree_iter iter;
1861 bch2_trans_init(&trans, c, 0, 0);
1863 ret = bch2_btree_write_buffer_flush(&trans);
1867 for_each_member_device(ca, c, i) {
1868 s64 nr_to_invalidate =
1869 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
1871 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_lru,
1872 lru_pos(ca->dev_idx, 0, 0),
1873 lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
1874 BTREE_ITER_INTENT, k,
1875 invalidate_one_bucket(&trans, &iter, k, &nr_to_invalidate));
1878 percpu_ref_put(&ca->ref);
1883 bch2_trans_exit(&trans);
1884 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1887 void bch2_do_invalidates(struct bch_fs *c)
1889 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
1890 !queue_work(c->write_ref_wq, &c->invalidate_work))
1891 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1894 static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
1895 unsigned long *last_updated)
1897 struct btree_trans trans;
1898 struct btree_iter iter;
1901 struct bpos end = POS(ca->dev_idx, ca->mi.nbuckets);
1902 struct bch_member *m;
1905 bch2_trans_init(&trans, c, 0, 0);
1907 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc,
1908 POS(ca->dev_idx, ca->mi.first_bucket),
1909 BTREE_ITER_PREFETCH);
1911 * Scan the alloc btree for every bucket on @ca, and add buckets to the
1912 * freespace/need_discard/need_gc_gens btrees as needed:
1915 if (*last_updated + HZ * 10 < jiffies) {
1916 bch_info(ca, "%s: currently at %llu/%llu",
1917 __func__, iter.pos.offset, ca->mi.nbuckets);
1918 *last_updated = jiffies;
1921 bch2_trans_begin(&trans);
1923 if (bkey_ge(iter.pos, end)) {
1928 k = bch2_get_key_or_hole(&iter, end, &hole);
1935 * We process live keys in the alloc btree one at a
1938 struct bch_alloc_v4 a_convert;
1939 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1941 ret = bch2_bucket_do_index(&trans, k, a, true) ?:
1942 bch2_trans_commit(&trans, NULL, NULL,
1943 BTREE_INSERT_LAZY_RW|
1944 BTREE_INSERT_NOFAIL);
1948 bch2_btree_iter_advance(&iter);
1950 struct bkey_i *freespace;
1952 freespace = bch2_trans_kmalloc(&trans, sizeof(*freespace));
1953 ret = PTR_ERR_OR_ZERO(freespace);
1957 bkey_init(&freespace->k);
1958 freespace->k.type = KEY_TYPE_set;
1959 freespace->k.p = k.k->p;
1960 freespace->k.size = k.k->size;
1962 ret = __bch2_btree_insert(&trans, BTREE_ID_freespace, freespace, 0) ?:
1963 bch2_trans_commit(&trans, NULL, NULL,
1964 BTREE_INSERT_LAZY_RW|
1965 BTREE_INSERT_NOFAIL);
1969 bch2_btree_iter_set_pos(&iter, k.k->p);
1972 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1978 bch2_trans_iter_exit(&trans, &iter);
1979 bch2_trans_exit(&trans);
1982 bch_err(ca, "error initializing free space: %s", bch2_err_str(ret));
1986 mutex_lock(&c->sb_lock);
1987 m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
1988 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
1989 mutex_unlock(&c->sb_lock);
1994 int bch2_fs_freespace_init(struct bch_fs *c)
1999 bool doing_init = false;
2000 unsigned long last_updated = jiffies;
2003 * We can crash during the device add path, so we need to check this on
2007 for_each_member_device(ca, c, i) {
2008 if (ca->mi.freespace_initialized)
2012 bch_info(c, "initializing freespace");
2016 ret = bch2_dev_freespace_init(c, ca, &last_updated);
2018 percpu_ref_put(&ca->ref);
2024 mutex_lock(&c->sb_lock);
2025 bch2_write_super(c);
2026 mutex_unlock(&c->sb_lock);
2028 bch_verbose(c, "done initializing freespace");
2034 /* Bucket IO clocks: */
2036 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2037 size_t bucket_nr, int rw)
2039 struct bch_fs *c = trans->c;
2040 struct btree_iter iter;
2041 struct bkey_i_alloc_v4 *a;
2045 a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
2046 ret = PTR_ERR_OR_ZERO(a);
2050 now = atomic64_read(&c->io_clock[rw].now);
2051 if (a->v.io_time[rw] == now)
2054 a->v.io_time[rw] = now;
2056 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2057 bch2_trans_commit(trans, NULL, NULL, 0);
2059 bch2_trans_iter_exit(trans, &iter);
2063 /* Startup/shutdown (ro/rw): */
2065 void bch2_recalc_capacity(struct bch_fs *c)
2068 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2069 unsigned bucket_size_max = 0;
2070 unsigned long ra_pages = 0;
2073 lockdep_assert_held(&c->state_lock);
2075 for_each_online_member(ca, c, i) {
2076 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2078 ra_pages += bdi->ra_pages;
2081 bch2_set_ra_pages(c, ra_pages);
2083 for_each_rw_member(ca, c, i) {
2084 u64 dev_reserve = 0;
2087 * We need to reserve buckets (from the number
2088 * of currently available buckets) against
2089 * foreground writes so that mainly copygc can
2090 * make forward progress.
2092 * We need enough to refill the various reserves
2093 * from scratch - copygc will use its entire
2094 * reserve all at once, then run against when
2095 * its reserve is refilled (from the formerly
2096 * available buckets).
2098 * This reserve is just used when considering if
2099 * allocations for foreground writes must wait -
2100 * not -ENOSPC calculations.
2103 dev_reserve += ca->nr_btree_reserve * 2;
2104 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2106 dev_reserve += 1; /* btree write point */
2107 dev_reserve += 1; /* copygc write point */
2108 dev_reserve += 1; /* rebalance write point */
2110 dev_reserve *= ca->mi.bucket_size;
2112 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2113 ca->mi.first_bucket);
2115 reserved_sectors += dev_reserve * 2;
2117 bucket_size_max = max_t(unsigned, bucket_size_max,
2118 ca->mi.bucket_size);
2121 gc_reserve = c->opts.gc_reserve_bytes
2122 ? c->opts.gc_reserve_bytes >> 9
2123 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2125 reserved_sectors = max(gc_reserve, reserved_sectors);
2127 reserved_sectors = min(reserved_sectors, capacity);
2129 c->capacity = capacity - reserved_sectors;
2131 c->bucket_size_max = bucket_size_max;
2133 /* Wake up case someone was waiting for buckets */
2134 closure_wake_up(&c->freelist_wait);
2137 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2139 struct open_bucket *ob;
2142 for (ob = c->open_buckets;
2143 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2145 spin_lock(&ob->lock);
2146 if (ob->valid && !ob->on_partial_list &&
2147 ob->dev == ca->dev_idx)
2149 spin_unlock(&ob->lock);
2155 /* device goes ro: */
2156 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2160 /* First, remove device from allocation groups: */
2162 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2163 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2166 * Capacity is calculated based off of devices in allocation groups:
2168 bch2_recalc_capacity(c);
2170 bch2_open_buckets_stop(c, ca, false);
2173 * Wake up threads that were blocked on allocation, so they can notice
2174 * the device can no longer be removed and the capacity has changed:
2176 closure_wake_up(&c->freelist_wait);
2179 * journal_res_get() can block waiting for free space in the journal -
2180 * it needs to notice there may not be devices to allocate from anymore:
2182 wake_up(&c->journal.wait);
2184 /* Now wait for any in flight writes: */
2186 closure_wait_event(&c->open_buckets_wait,
2187 !bch2_dev_has_open_write_point(c, ca));
2190 /* device goes rw: */
2191 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2195 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2196 if (ca->mi.data_allowed & (1 << i))
2197 set_bit(ca->dev_idx, c->rw_devs[i].d);
2200 void bch2_fs_allocator_background_init(struct bch_fs *c)
2202 spin_lock_init(&c->freelist_lock);
2203 INIT_WORK(&c->discard_work, bch2_do_discards_work);
2204 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);