1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
12 #include "buckets_waiting_for_journal.h"
21 #include <linux/kthread.h>
22 #include <linux/math64.h>
23 #include <linux/random.h>
24 #include <linux/rculist.h>
25 #include <linux/rcupdate.h>
26 #include <linux/sched/task.h>
27 #include <linux/sort.h>
28 #include <trace/events/bcachefs.h>
30 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
31 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
36 const char * const bch2_bucket_states[] = {
45 /* Persistent alloc info: */
47 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
48 const void **p, unsigned field)
50 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
53 if (!(a->fields & (1 << field)))
58 v = *((const u8 *) *p);
77 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
78 unsigned field, u64 v)
80 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
85 a->v.fields |= 1 << field;
92 *((__le16 *) *p) = cpu_to_le16(v);
95 *((__le32 *) *p) = cpu_to_le32(v);
98 *((__le64 *) *p) = cpu_to_le64(v);
107 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
110 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
111 const void *d = in->data;
116 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
117 BCH_ALLOC_FIELDS_V1()
121 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
124 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
125 const u8 *in = a.v->data;
126 const u8 *end = bkey_val_end(a);
127 unsigned fieldnr = 0;
132 out->oldest_gen = a.v->oldest_gen;
133 out->data_type = a.v->data_type;
135 #define x(_name, _bits) \
136 if (fieldnr < a.v->nr_fields) { \
137 ret = bch2_varint_decode_fast(in, end, &v); \
145 if (v != out->_name) \
149 BCH_ALLOC_FIELDS_V2()
154 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
157 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
158 const u8 *in = a.v->data;
159 const u8 *end = bkey_val_end(a);
160 unsigned fieldnr = 0;
165 out->oldest_gen = a.v->oldest_gen;
166 out->data_type = a.v->data_type;
167 out->need_discard = BCH_ALLOC_NEED_DISCARD(a.v);
168 out->need_inc_gen = BCH_ALLOC_NEED_INC_GEN(a.v);
169 out->journal_seq = le64_to_cpu(a.v->journal_seq);
171 #define x(_name, _bits) \
172 if (fieldnr < a.v->nr_fields) { \
173 ret = bch2_varint_decode_fast(in, end, &v); \
181 if (v != out->_name) \
185 BCH_ALLOC_FIELDS_V2()
190 static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
191 const struct bkey_alloc_unpacked src)
193 struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
194 unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
196 u8 *end = (void *) &dst[1];
197 u8 *last_nonzero_field = out;
200 a->k.p = POS(src.dev, src.bucket);
202 a->v.oldest_gen = src.oldest_gen;
203 a->v.data_type = src.data_type;
204 a->v.journal_seq = cpu_to_le64(src.journal_seq);
205 SET_BCH_ALLOC_NEED_DISCARD(&a->v, src.need_discard);
206 SET_BCH_ALLOC_NEED_INC_GEN(&a->v, src.need_inc_gen);
208 #define x(_name, _bits) \
212 out += bch2_varint_encode_fast(out, src._name); \
214 last_nonzero_field = out; \
215 last_nonzero_fieldnr = nr_fields; \
220 BCH_ALLOC_FIELDS_V2()
224 out = last_nonzero_field;
225 a->v.nr_fields = last_nonzero_fieldnr;
227 bytes = (u8 *) out - (u8 *) &a->v;
228 set_bkey_val_bytes(&a->k, bytes);
229 memset_u64s_tail(&a->v, 0, bytes);
232 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
234 struct bkey_alloc_unpacked ret = {
236 .bucket = k.k->p.offset,
242 bch2_alloc_unpack_v1(&ret, k);
244 case KEY_TYPE_alloc_v2:
245 bch2_alloc_unpack_v2(&ret, k);
247 case KEY_TYPE_alloc_v3:
248 bch2_alloc_unpack_v3(&ret, k);
255 struct bkey_alloc_buf *bch2_alloc_pack(struct btree_trans *trans,
256 const struct bkey_alloc_unpacked src)
258 struct bkey_alloc_buf *dst;
260 dst = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
262 bch2_alloc_pack_v3(dst, src);
267 int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
268 struct bkey_alloc_unpacked *u, unsigned trigger_flags)
270 struct bkey_alloc_buf *a = bch2_alloc_pack(trans, *u);
272 return PTR_ERR_OR_ZERO(a) ?:
273 bch2_trans_update(trans, iter, &a->k, trigger_flags);
276 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
278 unsigned i, bytes = offsetof(struct bch_alloc, data);
280 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
281 if (a->fields & (1 << i))
282 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
284 return DIV_ROUND_UP(bytes, sizeof(u64));
287 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
289 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
291 if (k.k->p.inode >= c->sb.nr_devices ||
292 !c->devs[k.k->p.inode])
293 return "invalid device";
295 /* allow for unknown fields */
296 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
297 return "incorrect value size";
302 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
304 struct bkey_alloc_unpacked u;
306 if (k.k->p.inode >= c->sb.nr_devices ||
307 !c->devs[k.k->p.inode])
308 return "invalid device";
310 if (bch2_alloc_unpack_v2(&u, k))
311 return "unpack error";
316 const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
318 struct bkey_alloc_unpacked u;
320 if (k.k->p.inode >= c->sb.nr_devices ||
321 !c->devs[k.k->p.inode])
322 return "invalid device";
324 if (bch2_alloc_unpack_v3(&u, k))
325 return "unpack error";
330 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
333 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
335 pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu need_discard %u",
336 u.gen, u.oldest_gen, bch2_data_types[u.data_type],
337 u.journal_seq, u.need_discard);
338 #define x(_name, ...) pr_buf(out, " " #_name " %llu", (u64) u._name);
339 BCH_ALLOC_FIELDS_V2()
343 int bch2_alloc_read(struct bch_fs *c)
345 struct btree_trans trans;
346 struct btree_iter iter;
351 bch2_trans_init(&trans, c, 0, 0);
353 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
354 BTREE_ITER_PREFETCH, k, ret) {
355 ca = bch_dev_bkey_exists(c, k.k->p.inode);
357 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_unpack(k).gen;
359 bch2_trans_iter_exit(&trans, &iter);
361 bch2_trans_exit(&trans);
364 bch_err(c, "error reading alloc info: %i", ret);
369 /* Free space/discard btree: */
371 static int bch2_bucket_do_index(struct btree_trans *trans,
372 struct bkey_s_c alloc_k,
373 struct bkey_alloc_unpacked a,
376 struct bch_fs *c = trans->c;
377 struct bch_dev *ca = bch_dev_bkey_exists(c, a.dev);
378 struct btree_iter iter;
381 enum bucket_state state = bucket_state(a);
383 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
384 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
385 struct printbuf buf = PRINTBUF;
388 if (state != BUCKET_free &&
389 state != BUCKET_need_discard)
392 k = bch2_trans_kmalloc(trans, sizeof(*k));
397 k->k.type = new_type;
401 btree = BTREE_ID_freespace;
402 k->k.p = alloc_freespace_pos(a);
403 bch2_key_resize(&k->k, 1);
405 case BUCKET_need_discard:
406 btree = BTREE_ID_need_discard;
407 k->k.p = POS(a.dev, a.bucket);
413 bch2_trans_iter_init(trans, &iter, btree,
414 bkey_start_pos(&k->k),
416 old = bch2_btree_iter_peek_slot(&iter);
421 if (ca->mi.freespace_initialized &&
422 bch2_fs_inconsistent_on(old.k->type != old_type, c,
423 "incorrect key when %s %s btree (got %s should be %s)\n"
425 set ? "setting" : "clearing",
426 bch2_btree_ids[btree],
427 bch2_bkey_types[old.k->type],
428 bch2_bkey_types[old_type],
429 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
434 ret = bch2_trans_update(trans, &iter, k, 0);
436 bch2_trans_iter_exit(trans, &iter);
441 int bch2_trans_mark_alloc(struct btree_trans *trans,
442 struct bkey_s_c old, struct bkey_i *new,
445 struct bch_fs *c = trans->c;
446 struct bkey_alloc_unpacked old_u = bch2_alloc_unpack(old);
447 struct bkey_alloc_unpacked new_u = bch2_alloc_unpack(bkey_i_to_s_c(new));
448 u64 old_lru, new_lru;
449 bool need_repack = false;
452 if (new_u.dirty_sectors > old_u.dirty_sectors ||
453 new_u.cached_sectors > old_u.cached_sectors) {
454 new_u.read_time = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
455 new_u.write_time = max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
456 new_u.need_inc_gen = true;
457 new_u.need_discard = true;
461 if (old_u.data_type && !new_u.data_type &&
462 old_u.gen == new_u.gen &&
463 !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
465 new_u.need_inc_gen = false;
469 if (bucket_state(old_u) != bucket_state(new_u) ||
470 (bucket_state(new_u) == BUCKET_free &&
471 alloc_freespace_genbits(old_u) != alloc_freespace_genbits(new_u))) {
472 ret = bch2_bucket_do_index(trans, old, old_u, false) ?:
473 bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_u, true);
478 old_lru = alloc_lru_idx(old_u);
479 new_lru = alloc_lru_idx(new_u);
481 if (old_lru != new_lru) {
482 ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
487 if (new_lru && new_u.read_time != new_lru) {
488 new_u.read_time = new_lru;
493 if (need_repack && !bkey_deleted(&new->k))
494 bch2_alloc_pack_v3((void *) new, new_u);
499 static int bch2_check_alloc_key(struct btree_trans *trans,
500 struct btree_iter *alloc_iter)
502 struct bch_fs *c = trans->c;
503 struct btree_iter discard_iter, freespace_iter, lru_iter;
504 struct bkey_alloc_unpacked a;
505 unsigned discard_key_type, freespace_key_type;
506 struct bkey_s_c alloc_k, k;
507 struct printbuf buf = PRINTBUF;
508 struct printbuf buf2 = PRINTBUF;
511 alloc_k = bch2_btree_iter_peek(alloc_iter);
515 ret = bkey_err(alloc_k);
519 a = bch2_alloc_unpack(alloc_k);
520 discard_key_type = bucket_state(a) == BUCKET_need_discard
522 freespace_key_type = bucket_state(a) == BUCKET_free
525 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard,
527 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace,
528 alloc_freespace_pos(a), 0);
529 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
530 POS(a.dev, a.read_time), 0);
532 k = bch2_btree_iter_peek_slot(&discard_iter);
537 if (fsck_err_on(k.k->type != discard_key_type, c,
538 "incorrect key in need_discard btree (got %s should be %s)\n"
540 bch2_bkey_types[k.k->type],
541 bch2_bkey_types[discard_key_type],
542 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
543 struct bkey_i *update =
544 bch2_trans_kmalloc(trans, sizeof(*update));
546 ret = PTR_ERR_OR_ZERO(update);
550 bkey_init(&update->k);
551 update->k.type = discard_key_type;
552 update->k.p = discard_iter.pos;
554 ret = bch2_trans_update(trans, &discard_iter, update, 0) ?:
555 bch2_trans_commit(trans, NULL, NULL, 0);
560 k = bch2_btree_iter_peek_slot(&freespace_iter);
565 if (fsck_err_on(k.k->type != freespace_key_type, c,
566 "incorrect key in freespace btree (got %s should be %s)\n"
568 bch2_bkey_types[k.k->type],
569 bch2_bkey_types[freespace_key_type],
570 (printbuf_reset(&buf),
571 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
572 struct bkey_i *update =
573 bch2_trans_kmalloc(trans, sizeof(*update));
575 ret = PTR_ERR_OR_ZERO(update);
579 bkey_init(&update->k);
580 update->k.type = freespace_key_type;
581 update->k.p = freespace_iter.pos;
582 bch2_key_resize(&update->k, 1);
584 ret = bch2_trans_update(trans, &freespace_iter, update, 0) ?:
585 bch2_trans_commit(trans, NULL, NULL, 0);
590 if (bucket_state(a) == BUCKET_cached) {
591 if (fsck_err_on(!a.read_time, c,
592 "cached bucket with read_time 0\n"
594 (printbuf_reset(&buf),
595 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
597 a.read_time = atomic64_read(&c->io_clock[READ].now);
599 ret = bch2_lru_change(trans, a.dev, a.bucket,
601 bch2_alloc_write(trans, alloc_iter, &a, BTREE_TRIGGER_NORUN);
602 bch2_trans_commit(trans, NULL, NULL, 0);
607 k = bch2_btree_iter_peek_slot(&lru_iter);
612 if (fsck_err_on(k.k->type != KEY_TYPE_lru ||
613 le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != a.bucket, c,
614 "incorrect/missing lru entry\n"
617 (printbuf_reset(&buf),
618 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
619 (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
620 u64 read_time = a.read_time;
622 ret = bch2_lru_change(trans, a.dev, a.bucket,
624 (a.read_time != read_time
625 ? bch2_alloc_write(trans, alloc_iter, &a, BTREE_TRIGGER_NORUN)
627 bch2_trans_commit(trans, NULL, NULL, 0);
634 bch2_trans_iter_exit(trans, &lru_iter);
635 bch2_trans_iter_exit(trans, &freespace_iter);
636 bch2_trans_iter_exit(trans, &discard_iter);
637 printbuf_exit(&buf2);
642 static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
646 if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
649 ca = bch_dev_bkey_exists(c, pos.inode);
650 return pos.offset >= ca->mi.first_bucket &&
651 pos.offset < ca->mi.nbuckets;
654 static int bch2_check_freespace_key(struct btree_trans *trans,
655 struct btree_iter *freespace_iter,
658 struct bch_fs *c = trans->c;
659 struct btree_iter alloc_iter;
660 struct bkey_s_c k, freespace_k;
661 struct bkey_alloc_unpacked a;
664 struct bkey_i *update;
665 struct printbuf buf = PRINTBUF;
668 freespace_k = bch2_btree_iter_peek(freespace_iter);
672 ret = bkey_err(freespace_k);
676 pos = freespace_iter->pos;
677 pos.offset &= ~(~0ULL << 56);
678 genbits = freespace_iter->pos.offset & (~0ULL << 56);
680 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
682 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
683 "%llu:%llu set in freespace btree but device or bucket does not exist",
684 pos.inode, pos.offset))
687 k = bch2_btree_iter_peek_slot(&alloc_iter);
692 a = bch2_alloc_unpack(k);
694 if (fsck_err_on(bucket_state(a) != BUCKET_free ||
695 genbits != alloc_freespace_genbits(a), c,
696 "%s\n incorrectly set in freespace index (free %u, genbits %llu should be %llu)",
697 (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
698 bucket_state(a) == BUCKET_free,
699 genbits >> 56, alloc_freespace_genbits(a) >> 56))
704 bch2_trans_iter_exit(trans, &alloc_iter);
708 update = bch2_trans_kmalloc(trans, sizeof(*update));
709 ret = PTR_ERR_OR_ZERO(update);
713 bkey_init(&update->k);
714 update->k.p = freespace_iter->pos;
715 bch2_key_resize(&update->k, 1);
717 ret = bch2_trans_update(trans, freespace_iter, update, 0) ?:
718 bch2_trans_commit(trans, NULL, NULL, 0);
722 int bch2_check_alloc_info(struct bch_fs *c, bool initial)
724 struct btree_trans trans;
725 struct btree_iter iter;
727 int ret = 0, last_dev = -1;
729 bch2_trans_init(&trans, c, 0, 0);
731 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
732 BTREE_ITER_PREFETCH, k, ret) {
733 if (k.k->p.inode != last_dev) {
734 struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
736 if (!ca->mi.freespace_initialized) {
737 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
741 last_dev = k.k->p.inode;
744 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
745 bch2_check_alloc_key(&trans, &iter));
749 bch2_trans_iter_exit(&trans, &iter);
754 bch2_trans_iter_init(&trans, &iter, BTREE_ID_freespace, POS_MIN,
755 BTREE_ITER_PREFETCH);
757 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
758 bch2_check_freespace_key(&trans, &iter, initial));
762 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
764 bch2_trans_iter_exit(&trans, &iter);
766 bch2_trans_exit(&trans);
767 return ret < 0 ? ret : 0;
770 static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
771 struct bch_dev *ca, bool *discard_done)
773 struct bch_fs *c = trans->c;
774 struct btree_iter iter;
776 struct bkey_alloc_unpacked a;
777 struct printbuf buf = PRINTBUF;
780 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, pos,
782 k = bch2_btree_iter_peek_slot(&iter);
787 a = bch2_alloc_unpack(k);
789 if (a.need_inc_gen) {
791 a.need_inc_gen = false;
795 BUG_ON(a.journal_seq > c->journal.flushed_seq_ondisk);
797 if (bch2_fs_inconsistent_on(!a.need_discard, c,
798 "%s\n incorrectly set in need_discard btree",
799 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
804 if (!*discard_done && ca->mi.discard && !c->opts.nochanges) {
806 * This works without any other locks because this is the only
807 * thread that removes items from the need_discard tree
809 bch2_trans_unlock(trans);
810 blkdev_issue_discard(ca->disk_sb.bdev,
811 k.k->p.offset * ca->mi.bucket_size,
814 *discard_done = true;
816 ret = bch2_trans_relock(trans);
821 a.need_discard = false;
823 ret = bch2_alloc_write(trans, &iter, &a, 0);
825 bch2_trans_iter_exit(trans, &iter);
830 static void bch2_do_discards_work(struct work_struct *work)
832 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
833 struct bch_dev *ca = NULL;
834 struct btree_trans trans;
835 struct btree_iter iter;
839 bch2_trans_init(&trans, c, 0, 0);
841 for_each_btree_key(&trans, iter, BTREE_ID_need_discard,
842 POS_MIN, 0, k, ret) {
843 bool discard_done = false;
845 if (ca && k.k->p.inode != ca->dev_idx) {
846 percpu_ref_put(&ca->io_ref);
851 ca = bch_dev_bkey_exists(c, k.k->p.inode);
852 if (!percpu_ref_tryget(&ca->io_ref)) {
854 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
859 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
860 c->journal.flushed_seq_ondisk,
861 k.k->p.inode, k.k->p.offset) ||
862 bch2_bucket_is_open_safe(c, k.k->p.inode, k.k->p.offset))
865 ret = __bch2_trans_do(&trans, NULL, NULL,
866 BTREE_INSERT_USE_RESERVE|
868 bch2_clear_need_discard(&trans, k.k->p, ca, &discard_done));
872 bch2_trans_iter_exit(&trans, &iter);
875 percpu_ref_put(&ca->io_ref);
877 bch2_trans_exit(&trans);
878 percpu_ref_put(&c->writes);
881 void bch2_do_discards(struct bch_fs *c)
883 if (percpu_ref_tryget(&c->writes) &&
884 !queue_work(system_long_wq, &c->discard_work))
885 percpu_ref_put(&c->writes);
888 static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
890 struct bch_fs *c = trans->c;
891 struct btree_iter lru_iter, alloc_iter = { NULL };
893 struct bkey_alloc_unpacked a;
897 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
898 POS(ca->dev_idx, 0), 0);
899 k = bch2_btree_iter_peek(&lru_iter);
904 if (!k.k || k.k->p.inode != ca->dev_idx)
907 if (bch2_fs_inconsistent_on(k.k->type != KEY_TYPE_lru, c,
908 "non lru key in lru btree"))
912 bucket = le64_to_cpu(bkey_s_c_to_lru(k).v->idx);
914 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
915 POS(ca->dev_idx, bucket),
918 k = bch2_btree_iter_peek_slot(&alloc_iter);
923 a = bch2_alloc_unpack(k);
925 if (bch2_fs_inconsistent_on(idx != alloc_lru_idx(a), c,
926 "invalidating bucket with wrong lru idx (got %llu should be %llu",
927 idx, alloc_lru_idx(a)))
931 a.need_inc_gen = false;
934 a.cached_sectors = 0;
935 a.read_time = atomic64_read(&c->io_clock[READ].now);
936 a.write_time = atomic64_read(&c->io_clock[WRITE].now);
938 ret = bch2_alloc_write(trans, &alloc_iter, &a,
939 BTREE_TRIGGER_BUCKET_INVALIDATE);
941 bch2_trans_iter_exit(trans, &alloc_iter);
942 bch2_trans_iter_exit(trans, &lru_iter);
946 static void bch2_do_invalidates_work(struct work_struct *work)
948 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
950 struct btree_trans trans;
954 bch2_trans_init(&trans, c, 0, 0);
956 for_each_member_device(ca, c, i)
957 while (!ret && should_invalidate_buckets(ca))
958 ret = __bch2_trans_do(&trans, NULL, NULL,
959 BTREE_INSERT_USE_RESERVE|
961 invalidate_one_bucket(&trans, ca));
963 bch2_trans_exit(&trans);
964 percpu_ref_put(&c->writes);
967 void bch2_do_invalidates(struct bch_fs *c)
969 if (percpu_ref_tryget(&c->writes))
970 queue_work(system_long_wq, &c->invalidate_work);
973 static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
975 struct btree_trans trans;
976 struct btree_iter iter;
978 struct bkey_alloc_unpacked a;
979 struct bch_member *m;
982 bch2_trans_init(&trans, c, 0, 0);
984 for_each_btree_key(&trans, iter, BTREE_ID_alloc,
985 POS(ca->dev_idx, ca->mi.first_bucket),
987 BTREE_ITER_PREFETCH, k, ret) {
988 if (iter.pos.offset >= ca->mi.nbuckets)
991 a = bch2_alloc_unpack(k);
992 ret = __bch2_trans_do(&trans, NULL, NULL,
993 BTREE_INSERT_LAZY_RW,
994 bch2_bucket_do_index(&trans, k, a, true));
998 bch2_trans_iter_exit(&trans, &iter);
1000 bch2_trans_exit(&trans);
1003 bch_err(ca, "error initializing free space: %i", ret);
1007 mutex_lock(&c->sb_lock);
1008 m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
1009 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
1010 mutex_unlock(&c->sb_lock);
1015 int bch2_fs_freespace_init(struct bch_fs *c)
1020 bool doing_init = false;
1023 * We can crash during the device add path, so we need to check this on
1027 for_each_member_device(ca, c, i) {
1028 if (ca->mi.freespace_initialized)
1032 bch_info(c, "initializing freespace");
1036 ret = bch2_dev_freespace_init(c, ca);
1038 percpu_ref_put(&ca->ref);
1044 mutex_lock(&c->sb_lock);
1045 bch2_write_super(c);
1046 mutex_unlock(&c->sb_lock);
1048 bch_verbose(c, "done initializing freespace");
1054 /* Bucket IO clocks: */
1056 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
1057 size_t bucket_nr, int rw)
1059 struct bch_fs *c = trans->c;
1060 struct btree_iter iter;
1062 struct bkey_alloc_unpacked u;
1066 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
1069 k = bch2_btree_iter_peek_slot(&iter);
1074 u = bch2_alloc_unpack(k);
1076 time = rw == READ ? &u.read_time : &u.write_time;
1077 now = atomic64_read(&c->io_clock[rw].now);
1083 ret = bch2_alloc_write(trans, &iter, &u, 0) ?:
1084 bch2_trans_commit(trans, NULL, NULL, 0);
1086 bch2_trans_iter_exit(trans, &iter);
1090 /* Startup/shutdown (ro/rw): */
1092 void bch2_recalc_capacity(struct bch_fs *c)
1095 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1096 unsigned bucket_size_max = 0;
1097 unsigned long ra_pages = 0;
1100 lockdep_assert_held(&c->state_lock);
1102 for_each_online_member(ca, c, i) {
1103 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1105 ra_pages += bdi->ra_pages;
1108 bch2_set_ra_pages(c, ra_pages);
1110 for_each_rw_member(ca, c, i) {
1111 u64 dev_reserve = 0;
1114 * We need to reserve buckets (from the number
1115 * of currently available buckets) against
1116 * foreground writes so that mainly copygc can
1117 * make forward progress.
1119 * We need enough to refill the various reserves
1120 * from scratch - copygc will use its entire
1121 * reserve all at once, then run against when
1122 * its reserve is refilled (from the formerly
1123 * available buckets).
1125 * This reserve is just used when considering if
1126 * allocations for foreground writes must wait -
1127 * not -ENOSPC calculations.
1130 dev_reserve += ca->nr_btree_reserve * 2;
1131 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
1133 dev_reserve += 1; /* btree write point */
1134 dev_reserve += 1; /* copygc write point */
1135 dev_reserve += 1; /* rebalance write point */
1137 dev_reserve *= ca->mi.bucket_size;
1139 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1140 ca->mi.first_bucket);
1142 reserved_sectors += dev_reserve * 2;
1144 bucket_size_max = max_t(unsigned, bucket_size_max,
1145 ca->mi.bucket_size);
1148 gc_reserve = c->opts.gc_reserve_bytes
1149 ? c->opts.gc_reserve_bytes >> 9
1150 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1152 reserved_sectors = max(gc_reserve, reserved_sectors);
1154 reserved_sectors = min(reserved_sectors, capacity);
1156 c->capacity = capacity - reserved_sectors;
1158 c->bucket_size_max = bucket_size_max;
1160 /* Wake up case someone was waiting for buckets */
1161 closure_wake_up(&c->freelist_wait);
1164 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1166 struct open_bucket *ob;
1169 for (ob = c->open_buckets;
1170 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1172 spin_lock(&ob->lock);
1173 if (ob->valid && !ob->on_partial_list &&
1174 ob->dev == ca->dev_idx)
1176 spin_unlock(&ob->lock);
1182 /* device goes ro: */
1183 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1187 /* First, remove device from allocation groups: */
1189 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1190 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1193 * Capacity is calculated based off of devices in allocation groups:
1195 bch2_recalc_capacity(c);
1197 /* Next, close write points that point to this device... */
1198 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1199 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1201 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1202 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1203 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1205 mutex_lock(&c->btree_reserve_cache_lock);
1206 while (c->btree_reserve_cache_nr) {
1207 struct btree_alloc *a =
1208 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1210 bch2_open_buckets_put(c, &a->ob);
1212 mutex_unlock(&c->btree_reserve_cache_lock);
1215 struct open_bucket *ob;
1217 spin_lock(&c->freelist_lock);
1218 if (!ca->open_buckets_partial_nr) {
1219 spin_unlock(&c->freelist_lock);
1222 ob = c->open_buckets +
1223 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1224 ob->on_partial_list = false;
1225 spin_unlock(&c->freelist_lock);
1227 bch2_open_bucket_put(c, ob);
1230 bch2_ec_stop_dev(c, ca);
1233 * Wake up threads that were blocked on allocation, so they can notice
1234 * the device can no longer be removed and the capacity has changed:
1236 closure_wake_up(&c->freelist_wait);
1239 * journal_res_get() can block waiting for free space in the journal -
1240 * it needs to notice there may not be devices to allocate from anymore:
1242 wake_up(&c->journal.wait);
1244 /* Now wait for any in flight writes: */
1246 closure_wait_event(&c->open_buckets_wait,
1247 !bch2_dev_has_open_write_point(c, ca));
1250 /* device goes rw: */
1251 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1255 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1256 if (ca->mi.data_allowed & (1 << i))
1257 set_bit(ca->dev_idx, c->rw_devs[i].d);
1260 void bch2_fs_allocator_background_init(struct bch_fs *c)
1262 spin_lock_init(&c->freelist_lock);
1263 INIT_WORK(&c->discard_work, bch2_do_discards_work);
1264 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);