1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
12 #include "buckets_waiting_for_journal.h"
21 #include <linux/kthread.h>
22 #include <linux/math64.h>
23 #include <linux/random.h>
24 #include <linux/rculist.h>
25 #include <linux/rcupdate.h>
26 #include <linux/sched/task.h>
27 #include <linux/sort.h>
28 #include <trace/events/bcachefs.h>
30 /* Persistent alloc info: */
32 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
33 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
38 const char * const bch2_bucket_states[] = {
47 struct bkey_alloc_unpacked {
56 #define x(_name, _bits) u##_bits _name;
61 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
62 const void **p, unsigned field)
64 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
67 if (!(a->fields & (1 << field)))
72 v = *((const u8 *) *p);
91 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
92 unsigned field, u64 v)
94 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
99 a->v.fields |= 1 << field;
106 *((__le16 *) *p) = cpu_to_le16(v);
109 *((__le32 *) *p) = cpu_to_le32(v);
112 *((__le64 *) *p) = cpu_to_le64(v);
121 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
124 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
125 const void *d = in->data;
130 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
131 BCH_ALLOC_FIELDS_V1()
135 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
138 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
139 const u8 *in = a.v->data;
140 const u8 *end = bkey_val_end(a);
141 unsigned fieldnr = 0;
146 out->oldest_gen = a.v->oldest_gen;
147 out->data_type = a.v->data_type;
149 #define x(_name, _bits) \
150 if (fieldnr < a.v->nr_fields) { \
151 ret = bch2_varint_decode_fast(in, end, &v); \
159 if (v != out->_name) \
163 BCH_ALLOC_FIELDS_V2()
168 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
171 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
172 const u8 *in = a.v->data;
173 const u8 *end = bkey_val_end(a);
174 unsigned fieldnr = 0;
179 out->oldest_gen = a.v->oldest_gen;
180 out->data_type = a.v->data_type;
181 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
182 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
183 out->journal_seq = le64_to_cpu(a.v->journal_seq);
185 #define x(_name, _bits) \
186 if (fieldnr < a.v->nr_fields) { \
187 ret = bch2_varint_decode_fast(in, end, &v); \
195 if (v != out->_name) \
199 BCH_ALLOC_FIELDS_V2()
204 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
206 struct bkey_alloc_unpacked ret = {
208 .bucket = k.k->p.offset,
214 bch2_alloc_unpack_v1(&ret, k);
216 case KEY_TYPE_alloc_v2:
217 bch2_alloc_unpack_v2(&ret, k);
219 case KEY_TYPE_alloc_v3:
220 bch2_alloc_unpack_v3(&ret, k);
227 void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
229 if (k.k->type == KEY_TYPE_alloc_v4) {
230 *out = *bkey_s_c_to_alloc_v4(k).v;
232 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
234 *out = (struct bch_alloc_v4) {
235 .journal_seq = u.journal_seq,
236 .flags = u.need_discard,
238 .oldest_gen = u.oldest_gen,
239 .data_type = u.data_type,
240 .stripe_redundancy = u.stripe_redundancy,
241 .dirty_sectors = u.dirty_sectors,
242 .cached_sectors = u.cached_sectors,
243 .io_time[READ] = u.read_time,
244 .io_time[WRITE] = u.write_time,
250 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
252 struct bkey_i_alloc_v4 *ret;
254 if (k.k->type == KEY_TYPE_alloc_v4) {
255 ret = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
257 bkey_reassemble(&ret->k_i, k);
259 ret = bch2_trans_kmalloc(trans, sizeof(*ret));
261 bkey_alloc_v4_init(&ret->k_i);
263 bch2_alloc_to_v4(k, &ret->v);
269 struct bkey_i_alloc_v4 *
270 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
274 struct bkey_i_alloc_v4 *a;
277 bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
278 BTREE_ITER_WITH_UPDATES|
281 k = bch2_btree_iter_peek_slot(iter);
284 bch2_trans_iter_exit(trans, iter);
288 a = bch2_alloc_to_v4_mut(trans, k);
290 bch2_trans_iter_exit(trans, iter);
294 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
296 unsigned i, bytes = offsetof(struct bch_alloc, data);
298 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
299 if (a->fields & (1 << i))
300 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
302 return DIV_ROUND_UP(bytes, sizeof(u64));
305 int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
306 int rw, struct printbuf *err)
308 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
310 /* allow for unknown fields */
311 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
312 pr_buf(err, "incorrect value size (%zu < %u)",
313 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
320 int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
321 int rw, struct printbuf *err)
323 struct bkey_alloc_unpacked u;
325 if (bch2_alloc_unpack_v2(&u, k)) {
326 pr_buf(err, "unpack error");
333 int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
334 int rw, struct printbuf *err)
336 struct bkey_alloc_unpacked u;
338 if (bch2_alloc_unpack_v3(&u, k)) {
339 pr_buf(err, "unpack error");
346 int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
347 int rw, struct printbuf *err)
349 if (bkey_val_bytes(k.k) != sizeof(struct bch_alloc_v4)) {
350 pr_buf(err, "bad val size (%zu != %zu)",
351 bkey_val_bytes(k.k), sizeof(struct bch_alloc_v4));
358 void bch2_alloc_v4_swab(struct bkey_s k)
360 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
362 a->journal_seq = swab64(a->journal_seq);
363 a->flags = swab32(a->flags);
364 a->dirty_sectors = swab32(a->dirty_sectors);
365 a->cached_sectors = swab32(a->cached_sectors);
366 a->io_time[0] = swab64(a->io_time[0]);
367 a->io_time[1] = swab64(a->io_time[1]);
368 a->stripe = swab32(a->stripe);
369 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
372 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
374 struct bch_alloc_v4 a;
376 bch2_alloc_to_v4(k, &a);
378 pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu need_discard %llu",
379 a.gen, a.oldest_gen, bch2_data_types[a.data_type],
380 a.journal_seq, BCH_ALLOC_V4_NEED_DISCARD(&a));
381 pr_buf(out, " dirty_sectors %u", a.dirty_sectors);
382 pr_buf(out, " cached_sectors %u", a.cached_sectors);
383 pr_buf(out, " stripe %u", a.stripe);
384 pr_buf(out, " stripe_redundancy %u", a.stripe_redundancy);
385 pr_buf(out, " read_time %llu", a.io_time[READ]);
386 pr_buf(out, " write_time %llu", a.io_time[WRITE]);
389 int bch2_alloc_read(struct bch_fs *c)
391 struct btree_trans trans;
392 struct btree_iter iter;
394 struct bch_alloc_v4 a;
398 bch2_trans_init(&trans, c, 0, 0);
400 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
401 BTREE_ITER_PREFETCH, k, ret) {
402 ca = bch_dev_bkey_exists(c, k.k->p.inode);
403 bch2_alloc_to_v4(k, &a);
405 *bucket_gen(ca, k.k->p.offset) = a.gen;
407 bch2_trans_iter_exit(&trans, &iter);
409 bch2_trans_exit(&trans);
412 bch_err(c, "error reading alloc info: %i", ret);
417 /* Free space/discard btree: */
419 static int bch2_bucket_do_index(struct btree_trans *trans,
420 struct bkey_s_c alloc_k,
421 struct bch_alloc_v4 a,
424 struct bch_fs *c = trans->c;
425 struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
426 struct btree_iter iter;
429 enum bucket_state state = bucket_state(a);
431 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
432 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
433 struct printbuf buf = PRINTBUF;
436 if (state != BUCKET_free &&
437 state != BUCKET_need_discard)
440 k = bch2_trans_kmalloc(trans, sizeof(*k));
445 k->k.type = new_type;
449 btree = BTREE_ID_freespace;
450 k->k.p = alloc_freespace_pos(alloc_k.k->p, a);
451 bch2_key_resize(&k->k, 1);
453 case BUCKET_need_discard:
454 btree = BTREE_ID_need_discard;
455 k->k.p = alloc_k.k->p;
461 bch2_trans_iter_init(trans, &iter, btree,
462 bkey_start_pos(&k->k),
464 old = bch2_btree_iter_peek_slot(&iter);
469 if (ca->mi.freespace_initialized &&
470 bch2_fs_inconsistent_on(old.k->type != old_type, c,
471 "incorrect key when %s %s btree (got %s should be %s)\n"
473 set ? "setting" : "clearing",
474 bch2_btree_ids[btree],
475 bch2_bkey_types[old.k->type],
476 bch2_bkey_types[old_type],
477 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
482 ret = bch2_trans_update(trans, &iter, k, 0);
484 bch2_trans_iter_exit(trans, &iter);
489 int bch2_trans_mark_alloc(struct btree_trans *trans,
490 struct bkey_s_c old, struct bkey_i *new,
493 struct bch_fs *c = trans->c;
494 struct bch_alloc_v4 old_a, *new_a;
495 u64 old_lru, new_lru;
499 * Deletion only happens in the device removal path, with
500 * BTREE_TRIGGER_NORUN:
502 BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
504 bch2_alloc_to_v4(old, &old_a);
505 new_a = &bkey_i_to_alloc_v4(new)->v;
507 if (new_a->dirty_sectors > old_a.dirty_sectors ||
508 new_a->cached_sectors > old_a.cached_sectors) {
509 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
510 new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
511 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
512 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
515 if (old_a.data_type && !new_a->data_type &&
516 old_a.gen == new_a->gen &&
517 !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
519 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
522 if (bucket_state(old_a) != bucket_state(*new_a) ||
523 (bucket_state(*new_a) == BUCKET_free &&
524 alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) {
525 ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
526 bch2_bucket_do_index(trans, bkey_i_to_s_c(new), *new_a, true);
531 old_lru = alloc_lru_idx(old_a);
532 new_lru = alloc_lru_idx(*new_a);
534 if (old_lru != new_lru) {
535 ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
540 if (new_lru && new_a->io_time[READ] != new_lru)
541 new_a->io_time[READ] = new_lru;
547 static int bch2_check_alloc_key(struct btree_trans *trans,
548 struct btree_iter *alloc_iter)
550 struct bch_fs *c = trans->c;
552 struct btree_iter discard_iter, freespace_iter;
553 struct bch_alloc_v4 a;
554 unsigned discard_key_type, freespace_key_type;
555 struct bkey_s_c alloc_k, k;
556 struct printbuf buf = PRINTBUF;
557 struct printbuf buf2 = PRINTBUF;
560 alloc_k = bch2_btree_iter_peek(alloc_iter);
564 ret = bkey_err(alloc_k);
568 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
569 "alloc key for invalid device or bucket"))
570 return bch2_btree_delete_at(trans, alloc_iter, 0);
572 ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
573 if (!ca->mi.freespace_initialized)
576 bch2_alloc_to_v4(alloc_k, &a);
578 discard_key_type = bucket_state(a) == BUCKET_need_discard
580 freespace_key_type = bucket_state(a) == BUCKET_free
583 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard,
585 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace,
586 alloc_freespace_pos(alloc_k.k->p, a), 0);
588 k = bch2_btree_iter_peek_slot(&discard_iter);
593 if (fsck_err_on(k.k->type != discard_key_type, c,
594 "incorrect key in need_discard btree (got %s should be %s)\n"
596 bch2_bkey_types[k.k->type],
597 bch2_bkey_types[discard_key_type],
598 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
599 struct bkey_i *update =
600 bch2_trans_kmalloc(trans, sizeof(*update));
602 ret = PTR_ERR_OR_ZERO(update);
606 bkey_init(&update->k);
607 update->k.type = discard_key_type;
608 update->k.p = discard_iter.pos;
610 ret = bch2_trans_update(trans, &discard_iter, update, 0);
615 k = bch2_btree_iter_peek_slot(&freespace_iter);
620 if (fsck_err_on(k.k->type != freespace_key_type, c,
621 "incorrect key in freespace btree (got %s should be %s)\n"
623 bch2_bkey_types[k.k->type],
624 bch2_bkey_types[freespace_key_type],
625 (printbuf_reset(&buf),
626 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
627 struct bkey_i *update =
628 bch2_trans_kmalloc(trans, sizeof(*update));
630 ret = PTR_ERR_OR_ZERO(update);
634 bkey_init(&update->k);
635 update->k.type = freespace_key_type;
636 update->k.p = freespace_iter.pos;
637 bch2_key_resize(&update->k, 1);
639 ret = bch2_trans_update(trans, &freespace_iter, update, 0);
645 bch2_trans_iter_exit(trans, &freespace_iter);
646 bch2_trans_iter_exit(trans, &discard_iter);
647 printbuf_exit(&buf2);
652 static int bch2_check_discard_freespace_key(struct btree_trans *trans,
653 struct btree_iter *iter)
655 struct bch_fs *c = trans->c;
656 struct btree_iter alloc_iter;
657 struct bkey_s_c k, freespace_k;
658 struct bch_alloc_v4 a;
661 struct bkey_i *update;
662 enum bucket_state state = iter->btree_id == BTREE_ID_need_discard
663 ? BUCKET_need_discard
665 struct printbuf buf = PRINTBUF;
668 freespace_k = bch2_btree_iter_peek(iter);
672 ret = bkey_err(freespace_k);
677 pos.offset &= ~(~0ULL << 56);
678 genbits = iter->pos.offset & (~0ULL << 56);
680 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
682 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
683 "%llu:%llu set in %s btree but device or bucket does not exist",
684 pos.inode, pos.offset,
685 bch2_btree_ids[iter->btree_id]))
688 k = bch2_btree_iter_peek_slot(&alloc_iter);
693 bch2_alloc_to_v4(k, &a);
695 if (fsck_err_on(bucket_state(a) != state ||
696 (state == BUCKET_free &&
697 genbits != alloc_freespace_genbits(a)), c,
698 "%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
699 (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
700 bch2_btree_ids[iter->btree_id],
701 bucket_state(a) == state,
702 genbits >> 56, alloc_freespace_genbits(a) >> 56))
707 bch2_trans_iter_exit(trans, &alloc_iter);
711 if (iter->btree_id == BTREE_ID_freespace) {
712 /* should probably add a helper for deleting extents */
713 update = bch2_trans_kmalloc(trans, sizeof(*update));
714 ret = PTR_ERR_OR_ZERO(update);
718 bkey_init(&update->k);
719 update->k.p = iter->pos;
720 bch2_key_resize(&update->k, 1);
722 ret = bch2_trans_update(trans, iter, update, 0);
724 ret = bch2_btree_delete_at(trans, iter, 0);
729 int bch2_check_alloc_info(struct bch_fs *c)
731 struct btree_trans trans;
732 struct btree_iter iter;
736 bch2_trans_init(&trans, c, 0, 0);
738 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
739 BTREE_ITER_PREFETCH, k, ret) {
740 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
741 bch2_check_alloc_key(&trans, &iter));
745 bch2_trans_iter_exit(&trans, &iter);
750 bch2_trans_iter_init(&trans, &iter, BTREE_ID_need_discard, POS_MIN,
751 BTREE_ITER_PREFETCH);
753 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
754 bch2_check_discard_freespace_key(&trans, &iter));
758 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
760 bch2_trans_iter_exit(&trans, &iter);
765 bch2_trans_iter_init(&trans, &iter, BTREE_ID_freespace, POS_MIN,
766 BTREE_ITER_PREFETCH);
768 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
769 bch2_check_discard_freespace_key(&trans, &iter));
773 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
775 bch2_trans_iter_exit(&trans, &iter);
777 bch2_trans_exit(&trans);
778 return ret < 0 ? ret : 0;
781 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
782 struct btree_iter *alloc_iter)
784 struct bch_fs *c = trans->c;
785 struct btree_iter lru_iter;
786 struct bch_alloc_v4 a;
787 struct bkey_s_c alloc_k, k;
788 struct printbuf buf = PRINTBUF;
789 struct printbuf buf2 = PRINTBUF;
792 alloc_k = bch2_btree_iter_peek(alloc_iter);
796 ret = bkey_err(alloc_k);
800 bch2_alloc_to_v4(alloc_k, &a);
802 if (bucket_state(a) != BUCKET_cached)
805 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
806 POS(alloc_k.k->p.inode, a.io_time[READ]), 0);
808 k = bch2_btree_iter_peek_slot(&lru_iter);
813 if (fsck_err_on(!a.io_time[READ], c,
814 "cached bucket with read_time 0\n"
816 (printbuf_reset(&buf),
817 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
818 fsck_err_on(k.k->type != KEY_TYPE_lru ||
819 le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
820 "incorrect/missing lru entry\n"
823 (printbuf_reset(&buf),
824 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
825 (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
826 u64 read_time = a.io_time[READ];
828 if (!a.io_time[READ])
829 a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
831 ret = bch2_lru_change(trans,
834 0, &a.io_time[READ]);
838 if (a.io_time[READ] != read_time) {
839 struct bkey_i_alloc_v4 *a_mut =
840 bch2_alloc_to_v4_mut(trans, alloc_k);
841 ret = PTR_ERR_OR_ZERO(a_mut);
845 a_mut->v.io_time[READ] = a.io_time[READ];
846 ret = bch2_trans_update(trans, alloc_iter,
847 &a_mut->k_i, BTREE_TRIGGER_NORUN);
854 bch2_trans_iter_exit(trans, &lru_iter);
855 printbuf_exit(&buf2);
860 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
862 struct btree_trans trans;
863 struct btree_iter iter;
867 bch2_trans_init(&trans, c, 0, 0);
869 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
870 BTREE_ITER_PREFETCH, k, ret) {
871 ret = __bch2_trans_do(&trans, NULL, NULL,
873 BTREE_INSERT_LAZY_RW,
874 bch2_check_alloc_to_lru_ref(&trans, &iter));
878 bch2_trans_iter_exit(&trans, &iter);
880 bch2_trans_exit(&trans);
881 return ret < 0 ? ret : 0;
884 static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
885 struct bch_dev *ca, bool *discard_done)
887 struct bch_fs *c = trans->c;
888 struct btree_iter iter;
890 struct bkey_i_alloc_v4 *a;
891 struct printbuf buf = PRINTBUF;
894 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, pos,
896 k = bch2_btree_iter_peek_slot(&iter);
901 a = bch2_alloc_to_v4_mut(trans, k);
902 ret = PTR_ERR_OR_ZERO(a);
906 if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
908 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
912 BUG_ON(a->v.journal_seq > c->journal.flushed_seq_ondisk);
914 if (bch2_fs_inconsistent_on(!BCH_ALLOC_V4_NEED_DISCARD(&a->v), c,
915 "%s\n incorrectly set in need_discard btree",
916 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
921 if (!*discard_done && ca->mi.discard && !c->opts.nochanges) {
923 * This works without any other locks because this is the only
924 * thread that removes items from the need_discard tree
926 bch2_trans_unlock(trans);
927 blkdev_issue_discard(ca->disk_sb.bdev,
928 k.k->p.offset * ca->mi.bucket_size,
931 *discard_done = true;
933 ret = bch2_trans_relock(trans) ? 0 : -EINTR;
938 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
940 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
942 bch2_trans_iter_exit(trans, &iter);
947 static void bch2_do_discards_work(struct work_struct *work)
949 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
950 struct bch_dev *ca = NULL;
951 struct btree_trans trans;
952 struct btree_iter iter;
954 u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
957 bch2_trans_init(&trans, c, 0, 0);
959 for_each_btree_key(&trans, iter, BTREE_ID_need_discard,
960 POS_MIN, 0, k, ret) {
961 bool discard_done = false;
963 if (ca && k.k->p.inode != ca->dev_idx) {
964 percpu_ref_put(&ca->io_ref);
969 ca = bch_dev_bkey_exists(c, k.k->p.inode);
970 if (!percpu_ref_tryget(&ca->io_ref)) {
972 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
979 if (bch2_bucket_is_open_safe(c, k.k->p.inode, k.k->p.offset)) {
984 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
985 c->journal.flushed_seq_ondisk,
986 k.k->p.inode, k.k->p.offset)) {
987 need_journal_commit++;
991 ret = __bch2_trans_do(&trans, NULL, NULL,
992 BTREE_INSERT_USE_RESERVE|
994 bch2_clear_need_discard(&trans, k.k->p, ca, &discard_done));
1000 bch2_trans_iter_exit(&trans, &iter);
1003 percpu_ref_put(&ca->io_ref);
1005 bch2_trans_exit(&trans);
1007 if (need_journal_commit * 2 > seen)
1008 bch2_journal_flush_async(&c->journal, NULL);
1010 percpu_ref_put(&c->writes);
1012 trace_do_discards(c, seen, open, need_journal_commit, discarded, ret);
1015 void bch2_do_discards(struct bch_fs *c)
1017 if (percpu_ref_tryget(&c->writes) &&
1018 !queue_work(system_long_wq, &c->discard_work))
1019 percpu_ref_put(&c->writes);
1022 static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
1024 struct bch_fs *c = trans->c;
1025 struct btree_iter lru_iter, alloc_iter = { NULL };
1027 struct bkey_i_alloc_v4 *a;
1031 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
1032 POS(ca->dev_idx, 0), 0);
1033 k = bch2_btree_iter_peek(&lru_iter);
1038 if (!k.k || k.k->p.inode != ca->dev_idx)
1041 if (bch2_fs_inconsistent_on(k.k->type != KEY_TYPE_lru, c,
1042 "non lru key in lru btree"))
1045 idx = k.k->p.offset;
1046 bucket = le64_to_cpu(bkey_s_c_to_lru(k).v->idx);
1048 a = bch2_trans_start_alloc_update(trans, &alloc_iter,
1049 POS(ca->dev_idx, bucket));
1050 ret = PTR_ERR_OR_ZERO(a);
1054 if (bch2_fs_inconsistent_on(idx != alloc_lru_idx(a->v), c,
1055 "invalidating bucket with wrong lru idx (got %llu should be %llu",
1056 idx, alloc_lru_idx(a->v)))
1059 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1062 a->v.dirty_sectors = 0;
1063 a->v.cached_sectors = 0;
1064 a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1065 a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
1067 ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
1068 BTREE_TRIGGER_BUCKET_INVALIDATE);
1070 bch2_trans_iter_exit(trans, &alloc_iter);
1071 bch2_trans_iter_exit(trans, &lru_iter);
1075 static void bch2_do_invalidates_work(struct work_struct *work)
1077 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1079 struct btree_trans trans;
1083 bch2_trans_init(&trans, c, 0, 0);
1085 for_each_member_device(ca, c, i)
1086 while (!ret && should_invalidate_buckets(ca))
1087 ret = __bch2_trans_do(&trans, NULL, NULL,
1088 BTREE_INSERT_USE_RESERVE|
1089 BTREE_INSERT_NOFAIL,
1090 invalidate_one_bucket(&trans, ca));
1092 bch2_trans_exit(&trans);
1093 percpu_ref_put(&c->writes);
1096 void bch2_do_invalidates(struct bch_fs *c)
1098 if (percpu_ref_tryget(&c->writes))
1099 queue_work(system_long_wq, &c->invalidate_work);
1102 static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
1104 struct btree_trans trans;
1105 struct btree_iter iter;
1107 struct bch_alloc_v4 a;
1108 struct bch_member *m;
1111 bch2_trans_init(&trans, c, 0, 0);
1113 for_each_btree_key(&trans, iter, BTREE_ID_alloc,
1114 POS(ca->dev_idx, ca->mi.first_bucket),
1116 BTREE_ITER_PREFETCH, k, ret) {
1117 if (iter.pos.offset >= ca->mi.nbuckets)
1120 bch2_alloc_to_v4(k, &a);
1121 ret = __bch2_trans_do(&trans, NULL, NULL,
1122 BTREE_INSERT_LAZY_RW,
1123 bch2_bucket_do_index(&trans, k, a, true));
1127 bch2_trans_iter_exit(&trans, &iter);
1129 bch2_trans_exit(&trans);
1132 bch_err(ca, "error initializing free space: %i", ret);
1136 mutex_lock(&c->sb_lock);
1137 m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
1138 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
1139 mutex_unlock(&c->sb_lock);
1144 int bch2_fs_freespace_init(struct bch_fs *c)
1149 bool doing_init = false;
1152 * We can crash during the device add path, so we need to check this on
1156 for_each_member_device(ca, c, i) {
1157 if (ca->mi.freespace_initialized)
1161 bch_info(c, "initializing freespace");
1165 ret = bch2_dev_freespace_init(c, ca);
1167 percpu_ref_put(&ca->ref);
1173 mutex_lock(&c->sb_lock);
1174 bch2_write_super(c);
1175 mutex_unlock(&c->sb_lock);
1177 bch_verbose(c, "done initializing freespace");
1183 /* Bucket IO clocks: */
1185 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
1186 size_t bucket_nr, int rw)
1188 struct bch_fs *c = trans->c;
1189 struct btree_iter iter;
1190 struct bkey_i_alloc_v4 *a;
1194 a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
1195 ret = PTR_ERR_OR_ZERO(a);
1199 now = atomic64_read(&c->io_clock[rw].now);
1200 if (a->v.io_time[rw] == now)
1203 a->v.io_time[rw] = now;
1205 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1206 bch2_trans_commit(trans, NULL, NULL, 0);
1208 bch2_trans_iter_exit(trans, &iter);
1212 /* Startup/shutdown (ro/rw): */
1214 void bch2_recalc_capacity(struct bch_fs *c)
1217 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1218 unsigned bucket_size_max = 0;
1219 unsigned long ra_pages = 0;
1222 lockdep_assert_held(&c->state_lock);
1224 for_each_online_member(ca, c, i) {
1225 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1227 ra_pages += bdi->ra_pages;
1230 bch2_set_ra_pages(c, ra_pages);
1232 for_each_rw_member(ca, c, i) {
1233 u64 dev_reserve = 0;
1236 * We need to reserve buckets (from the number
1237 * of currently available buckets) against
1238 * foreground writes so that mainly copygc can
1239 * make forward progress.
1241 * We need enough to refill the various reserves
1242 * from scratch - copygc will use its entire
1243 * reserve all at once, then run against when
1244 * its reserve is refilled (from the formerly
1245 * available buckets).
1247 * This reserve is just used when considering if
1248 * allocations for foreground writes must wait -
1249 * not -ENOSPC calculations.
1252 dev_reserve += ca->nr_btree_reserve * 2;
1253 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
1255 dev_reserve += 1; /* btree write point */
1256 dev_reserve += 1; /* copygc write point */
1257 dev_reserve += 1; /* rebalance write point */
1259 dev_reserve *= ca->mi.bucket_size;
1261 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1262 ca->mi.first_bucket);
1264 reserved_sectors += dev_reserve * 2;
1266 bucket_size_max = max_t(unsigned, bucket_size_max,
1267 ca->mi.bucket_size);
1270 gc_reserve = c->opts.gc_reserve_bytes
1271 ? c->opts.gc_reserve_bytes >> 9
1272 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1274 reserved_sectors = max(gc_reserve, reserved_sectors);
1276 reserved_sectors = min(reserved_sectors, capacity);
1278 c->capacity = capacity - reserved_sectors;
1280 c->bucket_size_max = bucket_size_max;
1282 /* Wake up case someone was waiting for buckets */
1283 closure_wake_up(&c->freelist_wait);
1286 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1288 struct open_bucket *ob;
1291 for (ob = c->open_buckets;
1292 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1294 spin_lock(&ob->lock);
1295 if (ob->valid && !ob->on_partial_list &&
1296 ob->dev == ca->dev_idx)
1298 spin_unlock(&ob->lock);
1304 /* device goes ro: */
1305 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1309 /* First, remove device from allocation groups: */
1311 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1312 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1315 * Capacity is calculated based off of devices in allocation groups:
1317 bch2_recalc_capacity(c);
1319 /* Next, close write points that point to this device... */
1320 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1321 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1323 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1324 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1325 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1327 mutex_lock(&c->btree_reserve_cache_lock);
1328 while (c->btree_reserve_cache_nr) {
1329 struct btree_alloc *a =
1330 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1332 bch2_open_buckets_put(c, &a->ob);
1334 mutex_unlock(&c->btree_reserve_cache_lock);
1337 struct open_bucket *ob;
1339 spin_lock(&c->freelist_lock);
1340 if (!ca->open_buckets_partial_nr) {
1341 spin_unlock(&c->freelist_lock);
1344 ob = c->open_buckets +
1345 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1346 ob->on_partial_list = false;
1347 spin_unlock(&c->freelist_lock);
1349 bch2_open_bucket_put(c, ob);
1352 bch2_ec_stop_dev(c, ca);
1355 * Wake up threads that were blocked on allocation, so they can notice
1356 * the device can no longer be removed and the capacity has changed:
1358 closure_wake_up(&c->freelist_wait);
1361 * journal_res_get() can block waiting for free space in the journal -
1362 * it needs to notice there may not be devices to allocate from anymore:
1364 wake_up(&c->journal.wait);
1366 /* Now wait for any in flight writes: */
1368 closure_wait_event(&c->open_buckets_wait,
1369 !bch2_dev_has_open_write_point(c, ca));
1372 /* device goes rw: */
1373 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1377 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1378 if (ca->mi.data_allowed & (1 << i))
1379 set_bit(ca->dev_idx, c->rw_devs[i].d);
1382 void bch2_fs_allocator_background_init(struct bch_fs *c)
1384 spin_lock_init(&c->freelist_lock);
1385 INIT_WORK(&c->discard_work, bch2_do_discards_work);
1386 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);