1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
13 #include "buckets_waiting_for_journal.h"
22 #include <linux/kthread.h>
23 #include <linux/math64.h>
24 #include <linux/random.h>
25 #include <linux/rculist.h>
26 #include <linux/rcupdate.h>
27 #include <linux/sched/task.h>
28 #include <linux/sort.h>
29 #include <trace/events/bcachefs.h>
31 /* Persistent alloc info: */
33 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
34 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
39 struct bkey_alloc_unpacked {
46 #define x(_name, _bits) u##_bits _name;
51 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
52 const void **p, unsigned field)
54 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
57 if (!(a->fields & (1 << field)))
62 v = *((const u8 *) *p);
81 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
82 unsigned field, u64 v)
84 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
89 a->v.fields |= 1 << field;
96 *((__le16 *) *p) = cpu_to_le16(v);
99 *((__le32 *) *p) = cpu_to_le32(v);
102 *((__le64 *) *p) = cpu_to_le64(v);
111 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
114 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
115 const void *d = in->data;
120 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
121 BCH_ALLOC_FIELDS_V1()
125 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
128 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
129 const u8 *in = a.v->data;
130 const u8 *end = bkey_val_end(a);
131 unsigned fieldnr = 0;
136 out->oldest_gen = a.v->oldest_gen;
137 out->data_type = a.v->data_type;
139 #define x(_name, _bits) \
140 if (fieldnr < a.v->nr_fields) { \
141 ret = bch2_varint_decode_fast(in, end, &v); \
149 if (v != out->_name) \
153 BCH_ALLOC_FIELDS_V2()
158 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
161 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
162 const u8 *in = a.v->data;
163 const u8 *end = bkey_val_end(a);
164 unsigned fieldnr = 0;
169 out->oldest_gen = a.v->oldest_gen;
170 out->data_type = a.v->data_type;
171 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
172 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
173 out->journal_seq = le64_to_cpu(a.v->journal_seq);
175 #define x(_name, _bits) \
176 if (fieldnr < a.v->nr_fields) { \
177 ret = bch2_varint_decode_fast(in, end, &v); \
185 if (v != out->_name) \
189 BCH_ALLOC_FIELDS_V2()
194 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
196 struct bkey_alloc_unpacked ret = { .gen = 0 };
200 bch2_alloc_unpack_v1(&ret, k);
202 case KEY_TYPE_alloc_v2:
203 bch2_alloc_unpack_v2(&ret, k);
205 case KEY_TYPE_alloc_v3:
206 bch2_alloc_unpack_v3(&ret, k);
213 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
215 unsigned i, bytes = offsetof(struct bch_alloc, data);
217 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
218 if (a->fields & (1 << i))
219 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
221 return DIV_ROUND_UP(bytes, sizeof(u64));
224 int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
225 int rw, struct printbuf *err)
227 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
229 /* allow for unknown fields */
230 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
231 prt_printf(err, "incorrect value size (%zu < %u)",
232 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
239 int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
240 int rw, struct printbuf *err)
242 struct bkey_alloc_unpacked u;
244 if (bch2_alloc_unpack_v2(&u, k)) {
245 prt_printf(err, "unpack error");
252 int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
253 int rw, struct printbuf *err)
255 struct bkey_alloc_unpacked u;
257 if (bch2_alloc_unpack_v3(&u, k)) {
258 prt_printf(err, "unpack error");
265 int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
266 int rw, struct printbuf *err)
268 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
270 if (alloc_v4_u64s(a.v) != bkey_val_u64s(k.k)) {
271 prt_printf(err, "bad val size (%lu != %u)",
272 bkey_val_u64s(k.k), alloc_v4_u64s(a.v));
276 if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
277 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
278 prt_printf(err, "invalid backpointers_start");
283 * XXX this is wrong, we'll be checking updates that happened from
284 * before BCH_FS_CHECK_BACKPOINTERS_DONE
286 if (rw == WRITE && test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
287 unsigned i, bp_len = 0;
289 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++)
290 bp_len += alloc_v4_backpointers_c(a.v)[i].bucket_len;
292 if (bp_len > a.v->dirty_sectors) {
293 prt_printf(err, "too many backpointers");
299 if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
300 prt_printf(err, "invalid data type (got %u should be %u)",
301 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
305 switch (a.v->data_type) {
307 case BCH_DATA_need_gc_gens:
308 case BCH_DATA_need_discard:
309 if (a.v->dirty_sectors ||
310 a.v->cached_sectors ||
312 prt_printf(err, "empty data type free but have data");
317 case BCH_DATA_journal:
320 case BCH_DATA_parity:
321 if (!a.v->dirty_sectors) {
322 prt_printf(err, "data_type %s but dirty_sectors==0",
323 bch2_data_types[a.v->data_type]);
327 case BCH_DATA_cached:
328 if (!a.v->cached_sectors ||
329 a.v->dirty_sectors ||
331 prt_printf(err, "data type inconsistency");
335 if (!a.v->io_time[READ] &&
336 test_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags)) {
337 prt_printf(err, "cached bucket with read_time == 0");
341 case BCH_DATA_stripe:
343 prt_printf(err, "data_type %s but stripe==0",
344 bch2_data_types[a.v->data_type]);
354 static inline u64 swab40(u64 x)
356 return (((x & 0x00000000ffULL) << 32)|
357 ((x & 0x000000ff00ULL) << 16)|
358 ((x & 0x0000ff0000ULL) >> 0)|
359 ((x & 0x00ff000000ULL) >> 16)|
360 ((x & 0xff00000000ULL) >> 32));
363 void bch2_alloc_v4_swab(struct bkey_s k)
365 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
366 struct bch_backpointer *bp, *bps;
368 a->journal_seq = swab64(a->journal_seq);
369 a->flags = swab32(a->flags);
370 a->dirty_sectors = swab32(a->dirty_sectors);
371 a->cached_sectors = swab32(a->cached_sectors);
372 a->io_time[0] = swab64(a->io_time[0]);
373 a->io_time[1] = swab64(a->io_time[1]);
374 a->stripe = swab32(a->stripe);
375 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
377 bps = alloc_v4_backpointers(a);
378 for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
379 bp->bucket_offset = swab40(bp->bucket_offset);
380 bp->bucket_len = swab32(bp->bucket_len);
381 bch2_bpos_swab(&bp->pos);
385 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
387 struct bch_alloc_v4 _a;
388 const struct bch_alloc_v4 *a = &_a;
389 const struct bch_backpointer *bps;
392 if (k.k->type == KEY_TYPE_alloc_v4)
393 a = bkey_s_c_to_alloc_v4(k).v;
395 bch2_alloc_to_v4(k, &_a);
398 printbuf_indent_add(out, 2);
400 prt_printf(out, "gen %u oldest_gen %u data_type %s",
401 a->gen, a->oldest_gen, bch2_data_types[a->data_type]);
403 prt_printf(out, "journal_seq %llu", a->journal_seq);
405 prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
407 prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
409 prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
411 prt_printf(out, "cached_sectors %u", a->cached_sectors);
413 prt_printf(out, "stripe %u", a->stripe);
415 prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
417 prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
419 prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
421 prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a));
422 printbuf_indent_add(out, 2);
424 bps = alloc_v4_backpointers_c(a);
425 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a); i++) {
427 bch2_backpointer_to_text(out, &bps[i]);
430 printbuf_indent_sub(out, 4);
433 void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
435 if (k.k->type == KEY_TYPE_alloc_v4) {
438 *out = *bkey_s_c_to_alloc_v4(k).v;
440 d = (int) BCH_ALLOC_V4_U64s -
441 (int) (BCH_ALLOC_V4_BACKPOINTERS_START(out) ?: BCH_ALLOC_V4_U64s_V0);
442 if (unlikely(d > 0)) {
443 memset((u64 *) out + BCH_ALLOC_V4_BACKPOINTERS_START(out),
446 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
449 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
451 *out = (struct bch_alloc_v4) {
452 .journal_seq = u.journal_seq,
453 .flags = u.need_discard,
455 .oldest_gen = u.oldest_gen,
456 .data_type = u.data_type,
457 .stripe_redundancy = u.stripe_redundancy,
458 .dirty_sectors = u.dirty_sectors,
459 .cached_sectors = u.cached_sectors,
460 .io_time[READ] = u.read_time,
461 .io_time[WRITE] = u.write_time,
465 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
469 static noinline struct bkey_i_alloc_v4 *
470 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
472 struct bkey_i_alloc_v4 *ret;
473 unsigned bytes = k.k->type == KEY_TYPE_alloc_v4
475 : sizeof(struct bkey_i_alloc_v4);
478 * Reserve space for one more backpointer here:
479 * Not sketchy at doing it this way, nope...
481 ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
485 if (k.k->type == KEY_TYPE_alloc_v4) {
486 struct bch_backpointer *src, *dst;
488 bkey_reassemble(&ret->k_i, k);
490 src = alloc_v4_backpointers(&ret->v);
491 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
492 dst = alloc_v4_backpointers(&ret->v);
494 memmove(dst, src, BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v) *
495 sizeof(struct bch_backpointer));
496 memset(src, 0, dst - src);
497 set_alloc_v4_u64s(ret);
499 bkey_alloc_v4_init(&ret->k_i);
501 bch2_alloc_to_v4(k, &ret->v);
506 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
508 if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
509 BCH_ALLOC_V4_BACKPOINTERS_START(bkey_s_c_to_alloc_v4(k).v) == BCH_ALLOC_V4_U64s) {
511 * Reserve space for one more backpointer here:
512 * Not sketchy at doing it this way, nope...
514 struct bkey_i_alloc_v4 *ret =
515 bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
517 bkey_reassemble(&ret->k_i, k);
521 return __bch2_alloc_to_v4_mut(trans, k);
524 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
526 return bch2_alloc_to_v4_mut_inlined(trans, k);
529 struct bkey_i_alloc_v4 *
530 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
534 struct bkey_i_alloc_v4 *a;
537 bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
538 BTREE_ITER_WITH_UPDATES|
541 k = bch2_btree_iter_peek_slot(iter);
544 bch2_trans_iter_exit(trans, iter);
548 a = bch2_alloc_to_v4_mut_inlined(trans, k);
550 bch2_trans_iter_exit(trans, iter);
554 int bch2_alloc_read(struct bch_fs *c)
556 struct btree_trans trans;
557 struct btree_iter iter;
559 struct bch_alloc_v4 a;
563 bch2_trans_init(&trans, c, 0, 0);
565 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
566 BTREE_ITER_PREFETCH, k, ret) {
568 * Not a fsck error because this is checked/repaired by
569 * bch2_check_alloc_key() which runs later:
571 if (!bch2_dev_bucket_exists(c, k.k->p))
574 ca = bch_dev_bkey_exists(c, k.k->p.inode);
575 bch2_alloc_to_v4(k, &a);
577 *bucket_gen(ca, k.k->p.offset) = a.gen;
579 bch2_trans_iter_exit(&trans, &iter);
581 bch2_trans_exit(&trans);
584 bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
589 /* Free space/discard btree: */
591 static int bch2_bucket_do_index(struct btree_trans *trans,
592 struct bkey_s_c alloc_k,
593 const struct bch_alloc_v4 *a,
596 struct bch_fs *c = trans->c;
597 struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
598 struct btree_iter iter;
602 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
603 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
604 struct printbuf buf = PRINTBUF;
607 if (a->data_type != BCH_DATA_free &&
608 a->data_type != BCH_DATA_need_discard)
611 k = bch2_trans_kmalloc(trans, sizeof(*k));
616 k->k.type = new_type;
618 switch (a->data_type) {
620 btree = BTREE_ID_freespace;
621 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
622 bch2_key_resize(&k->k, 1);
624 case BCH_DATA_need_discard:
625 btree = BTREE_ID_need_discard;
626 k->k.p = alloc_k.k->p;
632 bch2_trans_iter_init(trans, &iter, btree,
633 bkey_start_pos(&k->k),
635 old = bch2_btree_iter_peek_slot(&iter);
640 if (ca->mi.freespace_initialized &&
641 bch2_trans_inconsistent_on(old.k->type != old_type, trans,
642 "incorrect key when %s %s btree (got %s should be %s)\n"
644 set ? "setting" : "clearing",
645 bch2_btree_ids[btree],
646 bch2_bkey_types[old.k->type],
647 bch2_bkey_types[old_type],
648 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
653 ret = bch2_trans_update(trans, &iter, k, 0);
655 bch2_trans_iter_exit(trans, &iter);
660 int bch2_trans_mark_alloc(struct btree_trans *trans,
661 enum btree_id btree_id, unsigned level,
662 struct bkey_s_c old, struct bkey_i *new,
665 struct bch_fs *c = trans->c;
666 struct bch_alloc_v4 old_a, *new_a;
667 u64 old_lru, new_lru;
671 * Deletion only happens in the device removal path, with
672 * BTREE_TRIGGER_NORUN:
674 BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
676 bch2_alloc_to_v4(old, &old_a);
677 new_a = &bkey_i_to_alloc_v4(new)->v;
679 new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
681 if (new_a->dirty_sectors > old_a.dirty_sectors ||
682 new_a->cached_sectors > old_a.cached_sectors) {
683 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
684 new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
685 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
686 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
689 if (data_type_is_empty(new_a->data_type) &&
690 BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
691 !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
693 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
696 if (old_a.data_type != new_a->data_type ||
697 (new_a->data_type == BCH_DATA_free &&
698 alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) {
699 ret = bch2_bucket_do_index(trans, old, &old_a, false) ?:
700 bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
705 if (new_a->data_type == BCH_DATA_cached &&
706 !new_a->io_time[READ])
707 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
709 old_lru = alloc_lru_idx(old_a);
710 new_lru = alloc_lru_idx(*new_a);
712 if (old_lru != new_lru) {
713 ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
714 old_lru, &new_lru, old);
718 if (new_a->data_type == BCH_DATA_cached)
719 new_a->io_time[READ] = new_lru;
725 static int bch2_check_alloc_key(struct btree_trans *trans,
726 struct btree_iter *alloc_iter,
727 struct btree_iter *discard_iter,
728 struct btree_iter *freespace_iter)
730 struct bch_fs *c = trans->c;
732 struct bch_alloc_v4 a;
733 unsigned discard_key_type, freespace_key_type;
734 struct bkey_s_c alloc_k, k;
735 struct printbuf buf = PRINTBUF;
738 alloc_k = bch2_dev_bucket_exists(c, alloc_iter->pos)
739 ? bch2_btree_iter_peek_slot(alloc_iter)
740 : bch2_btree_iter_peek(alloc_iter);
744 ret = bkey_err(alloc_k);
748 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
749 "alloc key for invalid device:bucket %llu:%llu",
750 alloc_k.k->p.inode, alloc_k.k->p.offset))
751 return bch2_btree_delete_at(trans, alloc_iter, 0);
753 ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
754 if (!ca->mi.freespace_initialized)
757 bch2_alloc_to_v4(alloc_k, &a);
759 discard_key_type = a.data_type == BCH_DATA_need_discard
761 freespace_key_type = a.data_type == BCH_DATA_free
764 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
765 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, a));
767 k = bch2_btree_iter_peek_slot(discard_iter);
772 if (k.k->type != discard_key_type &&
773 (c->opts.reconstruct_alloc ||
774 fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
776 bch2_bkey_types[k.k->type],
777 bch2_bkey_types[discard_key_type],
778 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
779 struct bkey_i *update =
780 bch2_trans_kmalloc(trans, sizeof(*update));
782 ret = PTR_ERR_OR_ZERO(update);
786 bkey_init(&update->k);
787 update->k.type = discard_key_type;
788 update->k.p = discard_iter->pos;
790 ret = bch2_trans_update(trans, discard_iter, update, 0);
795 k = bch2_btree_iter_peek_slot(freespace_iter);
800 if (k.k->type != freespace_key_type &&
801 (c->opts.reconstruct_alloc ||
802 fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
804 bch2_bkey_types[k.k->type],
805 bch2_bkey_types[freespace_key_type],
806 (printbuf_reset(&buf),
807 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
808 struct bkey_i *update =
809 bch2_trans_kmalloc(trans, sizeof(*update));
811 ret = PTR_ERR_OR_ZERO(update);
815 bkey_init(&update->k);
816 update->k.type = freespace_key_type;
817 update->k.p = freespace_iter->pos;
818 bch2_key_resize(&update->k, 1);
820 ret = bch2_trans_update(trans, freespace_iter, update, 0);
830 static int bch2_check_discard_freespace_key(struct btree_trans *trans,
831 struct btree_iter *iter)
833 struct bch_fs *c = trans->c;
834 struct btree_iter alloc_iter;
835 struct bkey_s_c alloc_k;
836 struct bch_alloc_v4 a;
839 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
840 ? BCH_DATA_need_discard
842 struct printbuf buf = PRINTBUF;
846 pos.offset &= ~(~0ULL << 56);
847 genbits = iter->pos.offset & (~0ULL << 56);
849 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
851 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
852 "entry in %s btree for nonexistant dev:bucket %llu:%llu",
853 bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
856 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
857 ret = bkey_err(alloc_k);
861 bch2_alloc_to_v4(alloc_k, &a);
863 if (fsck_err_on(a.data_type != state ||
864 (state == BCH_DATA_free &&
865 genbits != alloc_freespace_genbits(a)), c,
866 "%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
867 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
868 bch2_btree_ids[iter->btree_id],
869 a.data_type == state,
870 genbits >> 56, alloc_freespace_genbits(a) >> 56))
875 bch2_trans_iter_exit(trans, &alloc_iter);
879 ret = bch2_btree_delete_extent_at(trans, iter,
880 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0);
884 int bch2_check_alloc_info(struct bch_fs *c)
886 struct btree_trans trans;
887 struct btree_iter iter, discard_iter, freespace_iter;
891 bch2_trans_init(&trans, c, 0, 0);
893 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
894 BTREE_ITER_PREFETCH);
895 bch2_trans_iter_init(&trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
896 BTREE_ITER_PREFETCH);
897 bch2_trans_iter_init(&trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
898 BTREE_ITER_PREFETCH);
900 ret = commit_do(&trans, NULL, NULL,
902 BTREE_INSERT_LAZY_RW,
903 bch2_check_alloc_key(&trans, &iter,
909 bch2_btree_iter_advance(&iter);
911 bch2_trans_iter_exit(&trans, &freespace_iter);
912 bch2_trans_iter_exit(&trans, &discard_iter);
913 bch2_trans_iter_exit(&trans, &iter);
918 ret = for_each_btree_key_commit(&trans, iter,
919 BTREE_ID_need_discard, POS_MIN,
920 BTREE_ITER_PREFETCH, k,
921 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
922 bch2_check_discard_freespace_key(&trans, &iter)) ?:
923 for_each_btree_key_commit(&trans, iter,
924 BTREE_ID_freespace, POS_MIN,
925 BTREE_ITER_PREFETCH, k,
926 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
927 bch2_check_discard_freespace_key(&trans, &iter));
929 bch2_trans_exit(&trans);
930 return ret < 0 ? ret : 0;
933 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
934 struct btree_iter *alloc_iter)
936 struct bch_fs *c = trans->c;
937 struct btree_iter lru_iter;
938 struct bch_alloc_v4 a;
939 struct bkey_s_c alloc_k, k;
940 struct printbuf buf = PRINTBUF;
941 struct printbuf buf2 = PRINTBUF;
944 alloc_k = bch2_btree_iter_peek(alloc_iter);
948 ret = bkey_err(alloc_k);
952 bch2_alloc_to_v4(alloc_k, &a);
954 if (a.data_type != BCH_DATA_cached)
957 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
958 POS(alloc_k.k->p.inode, a.io_time[READ]), 0);
960 k = bch2_btree_iter_peek_slot(&lru_iter);
965 if (fsck_err_on(!a.io_time[READ], c,
966 "cached bucket with read_time 0\n"
968 (printbuf_reset(&buf),
969 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
970 fsck_err_on(k.k->type != KEY_TYPE_lru ||
971 le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
972 "incorrect/missing lru entry\n"
975 (printbuf_reset(&buf),
976 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
977 (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
978 u64 read_time = a.io_time[READ];
980 if (!a.io_time[READ])
981 a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
983 ret = bch2_lru_set(trans,
990 if (a.io_time[READ] != read_time) {
991 struct bkey_i_alloc_v4 *a_mut =
992 bch2_alloc_to_v4_mut(trans, alloc_k);
993 ret = PTR_ERR_OR_ZERO(a_mut);
997 a_mut->v.io_time[READ] = a.io_time[READ];
998 ret = bch2_trans_update(trans, alloc_iter,
999 &a_mut->k_i, BTREE_TRIGGER_NORUN);
1006 bch2_trans_iter_exit(trans, &lru_iter);
1007 printbuf_exit(&buf2);
1008 printbuf_exit(&buf);
1012 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1014 struct btree_trans trans;
1015 struct btree_iter iter;
1019 bch2_trans_init(&trans, c, 0, 0);
1021 for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
1022 POS_MIN, BTREE_ITER_PREFETCH, k,
1023 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1024 bch2_check_alloc_to_lru_ref(&trans, &iter));
1026 bch2_trans_exit(&trans);
1027 return ret < 0 ? ret : 0;
1030 static int bch2_discard_one_bucket(struct btree_trans *trans,
1031 struct btree_iter *need_discard_iter,
1032 struct bpos *discard_pos_done,
1035 u64 *need_journal_commit,
1038 struct bch_fs *c = trans->c;
1039 struct bpos pos = need_discard_iter->pos;
1040 struct btree_iter iter = { NULL };
1043 struct bkey_i_alloc_v4 *a;
1044 struct printbuf buf = PRINTBUF;
1045 bool did_discard = false;
1048 ca = bch_dev_bkey_exists(c, pos.inode);
1049 if (!percpu_ref_tryget(&ca->io_ref)) {
1050 bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1054 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1059 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1060 c->journal.flushed_seq_ondisk,
1061 pos.inode, pos.offset)) {
1062 (*need_journal_commit)++;
1066 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
1067 need_discard_iter->pos,
1069 k = bch2_btree_iter_peek_slot(&iter);
1074 a = bch2_alloc_to_v4_mut(trans, k);
1075 ret = PTR_ERR_OR_ZERO(a);
1079 if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1081 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1085 if (bch2_trans_inconsistent_on(a->v.journal_seq > c->journal.flushed_seq_ondisk, trans,
1086 "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
1089 c->journal.flushed_seq_ondisk,
1090 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1095 if (bch2_trans_inconsistent_on(a->v.data_type != BCH_DATA_need_discard, trans,
1096 "bucket incorrectly set in need_discard btree\n"
1098 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1103 if (bkey_cmp(*discard_pos_done, iter.pos) &&
1104 ca->mi.discard && !c->opts.nochanges) {
1106 * This works without any other locks because this is the only
1107 * thread that removes items from the need_discard tree
1109 bch2_trans_unlock(trans);
1110 blkdev_issue_discard(ca->disk_sb.bdev,
1111 k.k->p.offset * ca->mi.bucket_size,
1115 ret = bch2_trans_relock(trans);
1120 *discard_pos_done = iter.pos;
1123 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1124 a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1126 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1127 bch2_trans_commit(trans, NULL, NULL,
1128 BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
1133 this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
1137 bch2_trans_iter_exit(trans, &iter);
1138 percpu_ref_put(&ca->io_ref);
1139 printbuf_exit(&buf);
1143 static void bch2_do_discards_work(struct work_struct *work)
1145 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1146 struct btree_trans trans;
1147 struct btree_iter iter;
1149 u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
1150 struct bpos discard_pos_done = POS_MAX;
1153 bch2_trans_init(&trans, c, 0, 0);
1156 * We're doing the commit in bch2_discard_one_bucket instead of using
1157 * for_each_btree_key_commit() so that we can increment counters after
1158 * successful commit:
1160 ret = for_each_btree_key2(&trans, iter,
1161 BTREE_ID_need_discard, POS_MIN, 0, k,
1162 bch2_discard_one_bucket(&trans, &iter, &discard_pos_done,
1165 &need_journal_commit,
1168 bch2_trans_exit(&trans);
1170 if (need_journal_commit * 2 > seen)
1171 bch2_journal_flush_async(&c->journal, NULL);
1173 percpu_ref_put(&c->writes);
1175 trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
1179 void bch2_do_discards(struct bch_fs *c)
1181 if (percpu_ref_tryget_live(&c->writes) &&
1182 !queue_work(system_long_wq, &c->discard_work))
1183 percpu_ref_put(&c->writes);
1186 static int invalidate_one_bucket(struct btree_trans *trans,
1187 struct btree_iter *lru_iter, struct bkey_s_c k,
1188 unsigned dev_idx, s64 *nr_to_invalidate)
1190 struct bch_fs *c = trans->c;
1191 struct btree_iter alloc_iter = { NULL };
1192 struct bkey_i_alloc_v4 *a;
1194 struct printbuf buf = PRINTBUF;
1195 unsigned cached_sectors;
1198 if (*nr_to_invalidate <= 0 || k.k->p.inode != dev_idx)
1201 if (k.k->type != KEY_TYPE_lru) {
1202 prt_printf(&buf, "non lru key in lru btree:\n ");
1203 bch2_bkey_val_to_text(&buf, c, k);
1205 if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
1206 bch_err(c, "%s", buf.buf);
1208 bch2_trans_inconsistent(trans, "%s", buf.buf);
1215 bucket = POS(dev_idx, le64_to_cpu(bkey_s_c_to_lru(k).v->idx));
1217 a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
1218 ret = PTR_ERR_OR_ZERO(a);
1222 if (k.k->p.offset != alloc_lru_idx(a->v)) {
1223 prt_printf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n ");
1224 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1225 prt_printf(&buf, "\n ");
1226 bch2_bkey_val_to_text(&buf, c, k);
1228 if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
1229 bch_err(c, "%s", buf.buf);
1231 bch2_trans_inconsistent(trans, "%s", buf.buf);
1238 if (!a->v.cached_sectors)
1239 bch_err(c, "invalidating empty bucket, confused");
1241 cached_sectors = a->v.cached_sectors;
1243 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1246 a->v.dirty_sectors = 0;
1247 a->v.cached_sectors = 0;
1248 a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1249 a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
1251 ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
1252 BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
1253 bch2_trans_commit(trans, NULL, NULL,
1254 BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
1258 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1259 --*nr_to_invalidate;
1261 bch2_trans_iter_exit(trans, &alloc_iter);
1262 printbuf_exit(&buf);
1266 static void bch2_do_invalidates_work(struct work_struct *work)
1268 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1270 struct btree_trans trans;
1271 struct btree_iter iter;
1276 bch2_trans_init(&trans, c, 0, 0);
1278 for_each_member_device(ca, c, i) {
1279 s64 nr_to_invalidate =
1280 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
1282 ret = for_each_btree_key2(&trans, iter, BTREE_ID_lru,
1283 POS(ca->dev_idx, 0), BTREE_ITER_INTENT, k,
1284 invalidate_one_bucket(&trans, &iter, k, ca->dev_idx, &nr_to_invalidate));
1287 percpu_ref_put(&ca->ref);
1292 bch2_trans_exit(&trans);
1293 percpu_ref_put(&c->writes);
1296 void bch2_do_invalidates(struct bch_fs *c)
1298 if (percpu_ref_tryget_live(&c->writes) &&
1299 !queue_work(system_long_wq, &c->invalidate_work))
1300 percpu_ref_put(&c->writes);
1303 static int bucket_freespace_init(struct btree_trans *trans, struct btree_iter *iter,
1304 struct bkey_s_c k, struct bch_dev *ca)
1306 struct bch_alloc_v4 a;
1308 if (iter->pos.offset >= ca->mi.nbuckets)
1311 bch2_alloc_to_v4(k, &a);
1312 return bch2_bucket_do_index(trans, k, &a, true);
1315 static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
1317 struct btree_trans trans;
1318 struct btree_iter iter;
1320 struct bch_member *m;
1323 bch2_trans_init(&trans, c, 0, 0);
1325 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
1326 POS(ca->dev_idx, ca->mi.first_bucket),
1327 BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
1328 NULL, NULL, BTREE_INSERT_LAZY_RW,
1329 bucket_freespace_init(&trans, &iter, k, ca));
1331 bch2_trans_exit(&trans);
1334 bch_err(ca, "error initializing free space: %s", bch2_err_str(ret));
1338 mutex_lock(&c->sb_lock);
1339 m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
1340 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
1341 mutex_unlock(&c->sb_lock);
1346 int bch2_fs_freespace_init(struct bch_fs *c)
1351 bool doing_init = false;
1354 * We can crash during the device add path, so we need to check this on
1358 for_each_member_device(ca, c, i) {
1359 if (ca->mi.freespace_initialized)
1363 bch_info(c, "initializing freespace");
1367 ret = bch2_dev_freespace_init(c, ca);
1369 percpu_ref_put(&ca->ref);
1375 mutex_lock(&c->sb_lock);
1376 bch2_write_super(c);
1377 mutex_unlock(&c->sb_lock);
1379 bch_verbose(c, "done initializing freespace");
1385 /* Bucket IO clocks: */
1387 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
1388 size_t bucket_nr, int rw)
1390 struct bch_fs *c = trans->c;
1391 struct btree_iter iter;
1392 struct bkey_i_alloc_v4 *a;
1396 a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
1397 ret = PTR_ERR_OR_ZERO(a);
1401 now = atomic64_read(&c->io_clock[rw].now);
1402 if (a->v.io_time[rw] == now)
1405 a->v.io_time[rw] = now;
1407 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1408 bch2_trans_commit(trans, NULL, NULL, 0);
1410 bch2_trans_iter_exit(trans, &iter);
1414 /* Startup/shutdown (ro/rw): */
1416 void bch2_recalc_capacity(struct bch_fs *c)
1419 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1420 unsigned bucket_size_max = 0;
1421 unsigned long ra_pages = 0;
1424 lockdep_assert_held(&c->state_lock);
1426 for_each_online_member(ca, c, i) {
1427 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1429 ra_pages += bdi->ra_pages;
1432 bch2_set_ra_pages(c, ra_pages);
1434 for_each_rw_member(ca, c, i) {
1435 u64 dev_reserve = 0;
1438 * We need to reserve buckets (from the number
1439 * of currently available buckets) against
1440 * foreground writes so that mainly copygc can
1441 * make forward progress.
1443 * We need enough to refill the various reserves
1444 * from scratch - copygc will use its entire
1445 * reserve all at once, then run against when
1446 * its reserve is refilled (from the formerly
1447 * available buckets).
1449 * This reserve is just used when considering if
1450 * allocations for foreground writes must wait -
1451 * not -ENOSPC calculations.
1454 dev_reserve += ca->nr_btree_reserve * 2;
1455 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
1457 dev_reserve += 1; /* btree write point */
1458 dev_reserve += 1; /* copygc write point */
1459 dev_reserve += 1; /* rebalance write point */
1461 dev_reserve *= ca->mi.bucket_size;
1463 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1464 ca->mi.first_bucket);
1466 reserved_sectors += dev_reserve * 2;
1468 bucket_size_max = max_t(unsigned, bucket_size_max,
1469 ca->mi.bucket_size);
1472 gc_reserve = c->opts.gc_reserve_bytes
1473 ? c->opts.gc_reserve_bytes >> 9
1474 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1476 reserved_sectors = max(gc_reserve, reserved_sectors);
1478 reserved_sectors = min(reserved_sectors, capacity);
1480 c->capacity = capacity - reserved_sectors;
1482 c->bucket_size_max = bucket_size_max;
1484 /* Wake up case someone was waiting for buckets */
1485 closure_wake_up(&c->freelist_wait);
1488 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1490 struct open_bucket *ob;
1493 for (ob = c->open_buckets;
1494 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1496 spin_lock(&ob->lock);
1497 if (ob->valid && !ob->on_partial_list &&
1498 ob->dev == ca->dev_idx)
1500 spin_unlock(&ob->lock);
1506 /* device goes ro: */
1507 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1511 /* First, remove device from allocation groups: */
1513 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1514 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1517 * Capacity is calculated based off of devices in allocation groups:
1519 bch2_recalc_capacity(c);
1521 /* Next, close write points that point to this device... */
1522 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1523 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1525 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1526 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1527 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1529 mutex_lock(&c->btree_reserve_cache_lock);
1530 while (c->btree_reserve_cache_nr) {
1531 struct btree_alloc *a =
1532 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1534 bch2_open_buckets_put(c, &a->ob);
1536 mutex_unlock(&c->btree_reserve_cache_lock);
1539 struct open_bucket *ob;
1541 spin_lock(&c->freelist_lock);
1542 if (!ca->open_buckets_partial_nr) {
1543 spin_unlock(&c->freelist_lock);
1546 ob = c->open_buckets +
1547 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1548 ob->on_partial_list = false;
1549 spin_unlock(&c->freelist_lock);
1551 bch2_open_bucket_put(c, ob);
1554 bch2_ec_stop_dev(c, ca);
1557 * Wake up threads that were blocked on allocation, so they can notice
1558 * the device can no longer be removed and the capacity has changed:
1560 closure_wake_up(&c->freelist_wait);
1563 * journal_res_get() can block waiting for free space in the journal -
1564 * it needs to notice there may not be devices to allocate from anymore:
1566 wake_up(&c->journal.wait);
1568 /* Now wait for any in flight writes: */
1570 closure_wait_event(&c->open_buckets_wait,
1571 !bch2_dev_has_open_write_point(c, ca));
1574 /* device goes rw: */
1575 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1579 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1580 if (ca->mi.data_allowed & (1 << i))
1581 set_bit(ca->dev_idx, c->rw_devs[i].d);
1584 void bch2_fs_allocator_background_init(struct bch_fs *c)
1586 spin_lock_init(&c->freelist_lock);
1587 INIT_WORK(&c->discard_work, bch2_do_discards_work);
1588 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);