3 #include "bkey_methods.h"
4 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
19 #include <trace/events/bcachefs.h>
21 static void verify_no_dups(struct btree *b,
22 struct bkey_packed *start,
23 struct bkey_packed *end)
25 #ifdef CONFIG_BCACHEFS_DEBUG
26 struct bkey_packed *k;
28 for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
29 struct bkey l = bkey_unpack_key(b, k);
30 struct bkey r = bkey_unpack_key(b, bkey_next(k));
32 BUG_ON(btree_node_is_extents(b)
33 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
34 : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
35 //BUG_ON(bkey_cmp_packed(&b->format, k, bkey_next(k)) >= 0);
40 static void clear_needs_whiteout(struct bset *i)
42 struct bkey_packed *k;
44 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
45 k->needs_whiteout = false;
48 static void set_needs_whiteout(struct bset *i)
50 struct bkey_packed *k;
52 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
53 k->needs_whiteout = true;
56 static void btree_bounce_free(struct bch_fs *c, unsigned order,
57 bool used_mempool, void *p)
60 mempool_free(p, &c->btree_bounce_pool);
62 vpfree(p, PAGE_SIZE << order);
65 static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
70 BUG_ON(order > btree_page_order(c));
72 *used_mempool = false;
73 p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
78 return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
81 typedef int (*sort_cmp_fn)(struct btree *,
83 struct bkey_packed *);
89 struct sort_iter_set {
90 struct bkey_packed *k, *end;
91 } data[MAX_BSETS + 1];
94 static void sort_iter_init(struct sort_iter *iter, struct btree *b)
96 memset(iter, 0, sizeof(*iter));
100 static inline void __sort_iter_sift(struct sort_iter *iter,
107 i + 1 < iter->used &&
108 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
110 swap(iter->data[i], iter->data[i + 1]);
113 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
116 __sort_iter_sift(iter, 0, cmp);
119 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
121 unsigned i = iter->used;
124 __sort_iter_sift(iter, i, cmp);
127 static void sort_iter_add(struct sort_iter *iter,
128 struct bkey_packed *k,
129 struct bkey_packed *end)
131 BUG_ON(iter->used >= ARRAY_SIZE(iter->data));
134 iter->data[iter->used++] = (struct sort_iter_set) { k, end };
137 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
139 return iter->used ? iter->data->k : NULL;
142 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
144 iter->data->k = bkey_next(iter->data->k);
146 BUG_ON(iter->data->k > iter->data->end);
148 if (iter->data->k == iter->data->end)
149 array_remove_item(iter->data, iter->used, 0);
151 sort_iter_sift(iter, cmp);
154 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
157 struct bkey_packed *ret = sort_iter_peek(iter);
160 sort_iter_advance(iter, cmp);
165 static inline int sort_key_whiteouts_cmp(struct btree *b,
166 struct bkey_packed *l,
167 struct bkey_packed *r)
169 return bkey_cmp_packed(b, l, r);
172 static unsigned sort_key_whiteouts(struct bkey_packed *dst,
173 struct sort_iter *iter)
175 struct bkey_packed *in, *out = dst;
177 sort_iter_sort(iter, sort_key_whiteouts_cmp);
179 while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
181 out = bkey_next(out);
184 return (u64 *) out - (u64 *) dst;
187 static inline int sort_extent_whiteouts_cmp(struct btree *b,
188 struct bkey_packed *l,
189 struct bkey_packed *r)
191 struct bkey ul = bkey_unpack_key(b, l);
192 struct bkey ur = bkey_unpack_key(b, r);
194 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
197 static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
198 struct sort_iter *iter)
200 const struct bkey_format *f = &iter->b->format;
201 struct bkey_packed *in, *out = dst;
203 bool prev = false, l_packed = false;
204 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
205 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
208 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
210 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
212 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
213 EBUG_ON(bkeyp_val_u64s(f, in));
214 EBUG_ON(in->type != KEY_TYPE_DISCARD);
216 r.k = bkey_unpack_key(iter->b, in);
219 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
220 if (bkey_cmp(l.k.p, r.k.p) >= 0)
224 ? min(max_packed_size, max_packed_offset -
225 bkey_start_offset(&l.k))
228 new_size = min(new_size, r.k.p.offset -
229 bkey_start_offset(&l.k));
231 BUG_ON(new_size < l.k.size);
233 bch2_key_resize(&l.k, new_size);
235 if (bkey_cmp(l.k.p, r.k.p) >= 0)
238 bch2_cut_front(l.k.p, &r);
242 if (!bch2_bkey_pack(out, &l, f)) {
246 out = bkey_next(out);
251 l_packed = bkey_packed(in);
255 if (!bch2_bkey_pack(out, &l, f)) {
259 out = bkey_next(out);
262 return (u64 *) out - (u64 *) dst;
265 static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
267 enum compact_mode mode)
269 unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
270 unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set];
272 if (mode == COMPACT_LAZY) {
273 if (should_compact_bset_lazy(b, t) ||
274 (compacting && bset_unwritten(b, bset(b, t))))
277 if (bset_written(b, bset(b, t)))
284 bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
285 enum compact_mode mode)
287 const struct bkey_format *f = &b->format;
289 struct bkey_packed *whiteouts = NULL;
290 struct bkey_packed *u_start, *u_pos;
291 struct sort_iter sort_iter;
292 unsigned order, whiteout_u64s = 0, u64s;
293 bool used_mempool, compacting = false;
296 whiteout_u64s += should_compact_bset(b, t,
297 whiteout_u64s != 0, mode);
302 sort_iter_init(&sort_iter, b);
304 whiteout_u64s += b->whiteout_u64s;
305 order = get_order(whiteout_u64s * sizeof(u64));
307 whiteouts = btree_bounce_alloc(c, order, &used_mempool);
308 u_start = u_pos = whiteouts;
310 memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
312 u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
314 sort_iter_add(&sort_iter, u_start, u_pos);
316 for_each_bset(b, t) {
317 struct bset *i = bset(b, t);
318 struct bkey_packed *k, *n, *out, *start, *end;
319 struct btree_node_entry *src = NULL, *dst = NULL;
321 if (t != b->set && bset_unwritten(b, i)) {
322 src = container_of(i, struct btree_node_entry, keys);
323 dst = max(write_block(b),
324 (void *) btree_bkey_last(b, t -1));
327 if (!should_compact_bset(b, t, compacting, mode)) {
329 memmove(dst, src, sizeof(*src) +
330 le16_to_cpu(src->keys.u64s) *
333 set_btree_bset(b, t, i);
341 end = vstruct_last(i);
344 memmove(dst, src, sizeof(*src));
346 set_btree_bset(b, t, i);
351 for (k = start; k != end; k = n) {
354 if (bkey_deleted(k) && btree_node_is_extents(b))
357 if (bkey_whiteout(k) && !k->needs_whiteout)
360 if (bkey_whiteout(k)) {
361 unreserve_whiteout(b, t, k);
362 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
363 set_bkeyp_val_u64s(f, u_pos, 0);
364 u_pos = bkey_next(u_pos);
365 } else if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
367 out = bkey_next(out);
371 sort_iter_add(&sort_iter, u_start, u_pos);
373 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
374 i->u64s = cpu_to_le16((u64 *) out - i->_data);
375 set_btree_bset_end(b, t);
376 bch2_bset_set_no_aux_tree(b, t);
380 b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
382 BUG_ON((void *) unwritten_whiteouts_start(c, b) <
383 (void *) btree_bkey_last(b, bset_tree_last(b)));
385 u64s = btree_node_is_extents(b)
386 ? sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
388 : sort_key_whiteouts(unwritten_whiteouts_start(c, b),
391 BUG_ON(u64s > b->whiteout_u64s);
392 BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
393 BUG_ON(u_pos != whiteouts && !u64s);
395 if (u64s != b->whiteout_u64s) {
396 void *src = unwritten_whiteouts_start(c, b);
398 b->whiteout_u64s = u64s;
399 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
403 unwritten_whiteouts_start(c, b),
404 unwritten_whiteouts_end(c, b));
406 btree_bounce_free(c, order, used_mempool, whiteouts);
408 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
409 bch2_btree_build_aux_trees(b);
411 bch_btree_keys_u64s_remaining(c, b);
412 bch2_verify_btree_nr_keys(b);
417 static bool bch2_drop_whiteouts(struct btree *b)
422 for_each_bset(b, t) {
423 struct bset *i = bset(b, t);
424 struct bkey_packed *k, *n, *out, *start, *end;
426 if (!should_compact_bset(b, t, true, COMPACT_WRITTEN))
429 start = btree_bkey_first(b, t);
430 end = btree_bkey_last(b, t);
432 if (bset_unwritten(b, i) &&
435 max_t(struct bset *, write_block(b),
436 (void *) btree_bkey_last(b, t -1));
438 memmove(dst, i, sizeof(struct bset));
440 set_btree_bset(b, t, i);
445 for (k = start; k != end; k = n) {
448 if (!bkey_whiteout(k)) {
450 out = bkey_next(out);
454 i->u64s = cpu_to_le16((u64 *) out - i->_data);
455 bch2_bset_set_no_aux_tree(b, t);
459 bch2_verify_btree_nr_keys(b);
464 static inline int sort_keys_cmp(struct btree *b,
465 struct bkey_packed *l,
466 struct bkey_packed *r)
468 return bkey_cmp_packed(b, l, r) ?:
469 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
470 (int) l->needs_whiteout - (int) r->needs_whiteout;
473 static unsigned sort_keys(struct bkey_packed *dst,
474 struct sort_iter *iter,
475 bool filter_whiteouts)
477 const struct bkey_format *f = &iter->b->format;
478 struct bkey_packed *in, *next, *out = dst;
480 sort_iter_sort(iter, sort_keys_cmp);
482 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
483 if (bkey_whiteout(in) &&
484 (filter_whiteouts || !in->needs_whiteout))
487 if (bkey_whiteout(in) &&
488 (next = sort_iter_peek(iter)) &&
489 !bkey_cmp_packed(iter->b, in, next)) {
490 BUG_ON(in->needs_whiteout &&
491 next->needs_whiteout);
493 * XXX racy, called with read lock from write path
495 * leads to spurious BUG_ON() in bkey_unpack_key() in
498 next->needs_whiteout |= in->needs_whiteout;
502 if (bkey_whiteout(in)) {
503 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
504 set_bkeyp_val_u64s(f, out, 0);
508 out = bkey_next(out);
511 return (u64 *) out - (u64 *) dst;
514 static inline int sort_extents_cmp(struct btree *b,
515 struct bkey_packed *l,
516 struct bkey_packed *r)
518 return bkey_cmp_packed(b, l, r) ?:
519 (int) bkey_deleted(l) - (int) bkey_deleted(r);
522 static unsigned sort_extents(struct bkey_packed *dst,
523 struct sort_iter *iter,
524 bool filter_whiteouts)
526 struct bkey_packed *in, *out = dst;
528 sort_iter_sort(iter, sort_extents_cmp);
530 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
531 if (bkey_deleted(in))
534 if (bkey_whiteout(in) &&
535 (filter_whiteouts || !in->needs_whiteout))
539 out = bkey_next(out);
542 return (u64 *) out - (u64 *) dst;
545 static void btree_node_sort(struct bch_fs *c, struct btree *b,
546 struct btree_iter *iter,
549 bool filter_whiteouts)
551 struct btree_node *out;
552 struct sort_iter sort_iter;
554 struct bset *start_bset = bset(b, &b->set[start_idx]);
555 bool used_mempool = false;
556 u64 start_time, seq = 0;
557 unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
558 bool sorting_entire_node = start_idx == 0 &&
561 sort_iter_init(&sort_iter, b);
563 for (t = b->set + start_idx;
564 t < b->set + end_idx;
566 u64s += le16_to_cpu(bset(b, t)->u64s);
567 sort_iter_add(&sort_iter,
568 btree_bkey_first(b, t),
569 btree_bkey_last(b, t));
572 order = sorting_entire_node
573 ? btree_page_order(c)
574 : get_order(__vstruct_bytes(struct btree_node, u64s));
576 out = btree_bounce_alloc(c, order, &used_mempool);
578 start_time = local_clock();
580 if (btree_node_is_extents(b))
581 filter_whiteouts = bset_written(b, start_bset);
583 u64s = btree_node_is_extents(b)
584 ? sort_extents(out->keys.start, &sort_iter, filter_whiteouts)
585 : sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
587 out->keys.u64s = cpu_to_le16(u64s);
589 BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
591 if (sorting_entire_node)
592 bch2_time_stats_update(&c->btree_sort_time, start_time);
594 /* Make sure we preserve bset journal_seq: */
595 for (t = b->set + start_idx; t < b->set + end_idx; t++)
596 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
597 start_bset->journal_seq = cpu_to_le64(seq);
599 if (sorting_entire_node) {
600 unsigned u64s = le16_to_cpu(out->keys.u64s);
602 BUG_ON(order != btree_page_order(c));
605 * Our temporary buffer is the same size as the btree node's
606 * buffer, we can just swap buffers instead of doing a big
610 out->keys.u64s = cpu_to_le16(u64s);
612 set_btree_bset(b, b->set, &b->data->keys);
614 start_bset->u64s = out->keys.u64s;
615 memcpy_u64s(start_bset->start,
617 le16_to_cpu(out->keys.u64s));
620 for (i = start_idx + 1; i < end_idx; i++)
621 b->nr.bset_u64s[start_idx] +=
626 for (i = start_idx + 1; i < b->nsets; i++) {
627 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
628 b->set[i] = b->set[i + shift];
631 for (i = b->nsets; i < MAX_BSETS; i++)
632 b->nr.bset_u64s[i] = 0;
634 set_btree_bset_end(b, &b->set[start_idx]);
635 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
637 btree_bounce_free(c, order, used_mempool, out);
639 bch2_verify_btree_nr_keys(b);
642 /* Sort + repack in a new format: */
643 static struct btree_nr_keys sort_repack(struct bset *dst,
645 struct btree_node_iter *src_iter,
646 struct bkey_format *out_f,
647 bool filter_whiteouts)
649 struct bkey_format *in_f = &src->format;
650 struct bkey_packed *in, *out = vstruct_last(dst);
651 struct btree_nr_keys nr;
653 memset(&nr, 0, sizeof(nr));
655 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
656 if (filter_whiteouts && bkey_whiteout(in))
659 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
660 ? in_f : &bch2_bkey_format_current, in))
661 out->format = KEY_FORMAT_LOCAL_BTREE;
663 bch2_bkey_unpack(src, (void *) out, in);
665 btree_keys_account_key_add(&nr, 0, out);
666 out = bkey_next(out);
669 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
673 /* Sort, repack, and merge: */
674 static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
677 struct btree_node_iter *iter,
678 struct bkey_format *out_f,
679 bool filter_whiteouts,
680 key_filter_fn filter,
683 struct bkey_packed *k, *prev = NULL, *out;
684 struct btree_nr_keys nr;
687 memset(&nr, 0, sizeof(nr));
689 while ((k = bch2_btree_node_iter_next_all(iter, src))) {
690 if (filter_whiteouts && bkey_whiteout(k))
694 * The filter might modify pointers, so we have to unpack the
695 * key and values to &tmp.k:
697 bch2_bkey_unpack(src, &tmp.k, k);
699 if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
702 /* prev is always unpacked, for key merging: */
706 merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE)
710 * the current key becomes the new prev: advance prev, then
711 * copy the current key - but first pack prev (in place):
714 bch2_bkey_pack(prev, (void *) prev, out_f);
716 btree_keys_account_key_add(&nr, 0, prev);
717 prev = bkey_next(prev);
719 prev = vstruct_last(dst);
722 bkey_copy(prev, &tmp.k);
726 bch2_bkey_pack(prev, (void *) prev, out_f);
727 btree_keys_account_key_add(&nr, 0, prev);
728 out = bkey_next(prev);
730 out = vstruct_last(dst);
733 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
737 void bch2_btree_sort_into(struct bch_fs *c,
741 struct btree_nr_keys nr;
742 struct btree_node_iter src_iter;
743 u64 start_time = local_clock();
745 BUG_ON(dst->nsets != 1);
747 bch2_bset_set_no_aux_tree(dst, dst->set);
749 bch2_btree_node_iter_init_from_start(&src_iter, src,
750 btree_node_is_extents(src));
752 if (btree_node_ops(src)->key_normalize ||
753 btree_node_ops(src)->key_merge)
754 nr = sort_repack_merge(c, btree_bset_first(dst),
758 btree_node_ops(src)->key_normalize,
759 btree_node_ops(src)->key_merge);
761 nr = sort_repack(btree_bset_first(dst),
766 bch2_time_stats_update(&c->btree_sort_time, start_time);
768 set_btree_bset_end(dst, dst->set);
770 dst->nr.live_u64s += nr.live_u64s;
771 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
772 dst->nr.packed_keys += nr.packed_keys;
773 dst->nr.unpacked_keys += nr.unpacked_keys;
775 bch2_verify_btree_nr_keys(dst);
778 #define SORT_CRIT (4096 / sizeof(u64))
781 * We're about to add another bset to the btree node, so if there's currently
782 * too many bsets - sort some of them together:
784 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
785 struct btree_iter *iter)
787 unsigned unwritten_idx;
790 for (unwritten_idx = 0;
791 unwritten_idx < b->nsets;
793 if (bset_unwritten(b, bset(b, &b->set[unwritten_idx])))
796 if (b->nsets - unwritten_idx > 1) {
797 btree_node_sort(c, b, iter, unwritten_idx,
802 if (unwritten_idx > 1) {
803 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
810 void bch2_btree_build_aux_trees(struct btree *b)
815 bch2_bset_build_aux_tree(b, t,
816 bset_unwritten(b, bset(b, t)) &&
817 t == bset_tree_last(b));
821 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
824 * Safe to call if there already is an unwritten bset - will only add a new bset
825 * if @b doesn't already have one.
827 * Returns true if we sorted (i.e. invalidated iterators
829 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
830 struct btree_iter *iter)
832 struct btree_node_entry *bne;
835 EBUG_ON(!(b->lock.state.seq & 1));
836 EBUG_ON(iter && iter->l[b->level].b != b);
838 did_sort = btree_node_compact(c, b, iter);
840 bne = want_new_bset(c, b);
842 bch2_bset_init_next(b, &bne->keys);
844 bch2_btree_build_aux_trees(b);
846 if (iter && did_sort)
847 bch2_btree_iter_reinit_node(iter, b);
850 static struct nonce btree_nonce(struct bset *i, unsigned offset)
852 return (struct nonce) {{
853 [0] = cpu_to_le32(offset),
854 [1] = ((__le32 *) &i->seq)[0],
855 [2] = ((__le32 *) &i->seq)[1],
856 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
860 static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
862 struct nonce nonce = btree_nonce(i, offset);
865 struct btree_node *bn = container_of(i, struct btree_node, keys);
866 unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
868 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
871 nonce = nonce_add(nonce, round_up(bytes, CHACHA20_BLOCK_SIZE));
874 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
875 vstruct_end(i) - (void *) i->_data);
878 static int btree_err_msg(struct bch_fs *c, struct btree *b, struct bset *i,
879 unsigned offset, int write, char *buf, size_t len)
881 char *out = buf, *end = buf + len;
883 out += scnprintf(out, end - out,
884 "error validating btree node %s "
885 "at btree %u level %u/%u\n"
886 "pos %llu:%llu node offset %u",
887 write ? "before write " : "",
888 b->btree_id, b->level,
889 c->btree_roots[b->btree_id].level,
890 b->key.k.p.inode, b->key.k.p.offset,
893 out += scnprintf(out, end - out,
895 le16_to_cpu(i->u64s));
900 enum btree_err_type {
902 BTREE_ERR_WANT_RETRY,
903 BTREE_ERR_MUST_RETRY,
907 enum btree_validate_ret {
908 BTREE_RETRY_READ = 64,
911 #define btree_err(type, c, b, i, msg, ...) \
913 char buf[200], *out = buf, *end = out + sizeof(buf); \
915 out += btree_err_msg(c, b, i, b->written, write, out, end - out);\
916 out += scnprintf(out, end - out, ": " msg, ##__VA_ARGS__); \
918 if (type == BTREE_ERR_FIXABLE && \
920 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
921 mustfix_fsck_err(c, "%s", buf); \
923 bch_err(c, "%s", buf); \
926 case BTREE_ERR_FIXABLE: \
927 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
929 case BTREE_ERR_WANT_RETRY: \
931 ret = BTREE_RETRY_READ; \
935 case BTREE_ERR_MUST_RETRY: \
936 ret = BTREE_RETRY_READ; \
938 case BTREE_ERR_FATAL: \
939 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
946 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
948 static int validate_bset(struct bch_fs *c, struct btree *b,
949 struct bset *i, unsigned sectors,
950 unsigned *whiteout_u64s, int write,
953 struct bkey_packed *k, *prev = NULL;
954 struct bpos prev_pos = POS_MIN;
955 enum bkey_type type = btree_node_type(b);
956 bool seen_non_whiteout = false;
960 if (i == &b->data->keys) {
961 /* These indicate that we read the wrong btree node: */
962 btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
963 BTREE_ERR_MUST_RETRY, c, b, i,
964 "incorrect btree id");
966 btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
967 BTREE_ERR_MUST_RETRY, c, b, i,
970 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
971 u64 *p = (u64 *) &b->data->ptr;
974 bch2_bpos_swab(&b->data->min_key);
975 bch2_bpos_swab(&b->data->max_key);
978 btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
979 BTREE_ERR_MUST_RETRY, c, b, i,
980 "incorrect max key");
982 /* XXX: ideally we would be validating min_key too */
985 * not correct anymore, due to btree node write error
988 * need to add b->data->seq to btree keys and verify
991 btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
993 BTREE_ERR_FATAL, c, b, i,
994 "incorrect backpointer");
996 err = bch2_bkey_format_validate(&b->data->format);
998 BTREE_ERR_FATAL, c, b, i,
999 "invalid bkey format: %s", err);
1002 if (btree_err_on(le16_to_cpu(i->version) != BCACHE_BSET_VERSION,
1003 BTREE_ERR_FIXABLE, c, b, i,
1004 "unsupported bset version")) {
1005 i->version = cpu_to_le16(BCACHE_BSET_VERSION);
1010 if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
1011 BTREE_ERR_FIXABLE, c, b, i,
1012 "bset past end of btree node")) {
1017 btree_err_on(b->written && !i->u64s,
1018 BTREE_ERR_FIXABLE, c, b, i,
1021 if (!BSET_SEPARATE_WHITEOUTS(i)) {
1022 seen_non_whiteout = true;
1027 k != vstruct_last(i);) {
1030 const char *invalid;
1032 if (btree_err_on(!k->u64s,
1033 BTREE_ERR_FIXABLE, c, b, i,
1034 "KEY_U64s 0: %zu bytes of metadata lost",
1035 vstruct_end(i) - (void *) k)) {
1036 i->u64s = cpu_to_le16((u64 *) k - i->_data);
1040 if (btree_err_on(bkey_next(k) > vstruct_last(i),
1041 BTREE_ERR_FIXABLE, c, b, i,
1042 "key extends past end of bset")) {
1043 i->u64s = cpu_to_le16((u64 *) k - i->_data);
1047 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
1048 BTREE_ERR_FIXABLE, c, b, i,
1049 "invalid bkey format %u", k->format)) {
1050 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1051 memmove_u64s_down(k, bkey_next(k),
1052 (u64 *) vstruct_end(i) - (u64 *) k);
1056 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
1057 bch2_bkey_swab(type, &b->format, k);
1059 u = bkey_disassemble(b, k, &tmp);
1061 invalid = __bch2_bkey_invalid(c, type, u) ?:
1062 bch2_bkey_in_btree_node(b, u) ?:
1063 (write ? bch2_bkey_val_invalid(c, type, u) : NULL);
1067 bch2_bkey_val_to_text(c, type, buf, sizeof(buf), u);
1068 btree_err(BTREE_ERR_FIXABLE, c, b, i,
1069 "invalid bkey %s: %s", buf, invalid);
1071 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1072 memmove_u64s_down(k, bkey_next(k),
1073 (u64 *) vstruct_end(i) - (u64 *) k);
1078 * with the separate whiteouts thing (used for extents), the
1079 * second set of keys actually can have whiteouts too, so we
1080 * can't solely go off bkey_whiteout()...
1083 if (!seen_non_whiteout &&
1084 (!bkey_whiteout(k) ||
1085 (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
1086 *whiteout_u64s = k->_data - i->_data;
1087 seen_non_whiteout = true;
1088 } else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
1089 btree_err(BTREE_ERR_FATAL, c, b, i,
1090 "keys out of order: %llu:%llu > %llu:%llu",
1094 bkey_start_offset(u.k));
1095 /* XXX: repair this */
1103 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1108 int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
1110 struct btree_node_entry *bne;
1111 struct btree_node_iter *iter;
1112 struct btree_node *sorted;
1113 struct bkey_packed *k;
1117 int ret, retry_read = 0, write = READ;
1119 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
1120 __bch2_btree_node_iter_init(iter, btree_node_is_extents(b));
1122 if (bch2_meta_read_fault("btree"))
1123 btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
1126 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1127 BTREE_ERR_MUST_RETRY, c, b, NULL,
1130 btree_err_on(!b->data->keys.seq,
1131 BTREE_ERR_MUST_RETRY, c, b, NULL,
1132 "bad btree header");
1134 while (b->written < c->opts.btree_node_size) {
1135 unsigned sectors, whiteout_u64s = 0;
1137 struct bch_csum csum;
1142 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1143 BTREE_ERR_WANT_RETRY, c, b, i,
1144 "unknown checksum type");
1146 nonce = btree_nonce(i, b->written << 9);
1147 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1149 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
1150 BTREE_ERR_WANT_RETRY, c, b, i,
1151 "invalid checksum");
1153 bset_encrypt(c, i, b->written << 9);
1155 sectors = vstruct_sectors(b->data, c->block_bits);
1157 set_btree_bset(b, b->set, &b->data->keys);
1158 btree_node_set_format(b, b->data->format);
1160 bne = write_block(b);
1163 if (i->seq != b->data->keys.seq)
1166 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1167 BTREE_ERR_WANT_RETRY, c, b, i,
1168 "unknown checksum type");
1170 nonce = btree_nonce(i, b->written << 9);
1171 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1173 btree_err_on(bch2_crc_cmp(csum, bne->csum),
1174 BTREE_ERR_WANT_RETRY, c, b, i,
1175 "invalid checksum");
1177 bset_encrypt(c, i, b->written << 9);
1179 sectors = vstruct_sectors(bne, c->block_bits);
1182 ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
1187 b->written += sectors;
1189 ret = bch2_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
1191 btree_err(BTREE_ERR_FATAL, c, b, i,
1192 "insufficient memory");
1197 btree_err_on(!b->written,
1198 BTREE_ERR_FIXABLE, c, b, i,
1199 "first btree node bset has blacklisted journal seq");
1204 __bch2_btree_node_iter_push(iter, b,
1206 vstruct_idx(i, whiteout_u64s));
1208 __bch2_btree_node_iter_push(iter, b,
1209 vstruct_idx(i, whiteout_u64s),
1213 for (bne = write_block(b);
1214 bset_byte_offset(b, bne) < btree_bytes(c);
1215 bne = (void *) bne + block_bytes(c))
1216 btree_err_on(bne->keys.seq == b->data->keys.seq,
1217 BTREE_ERR_WANT_RETRY, c, b, NULL,
1218 "found bset signature after last bset");
1220 sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
1221 sorted->keys.u64s = 0;
1223 b->nr = btree_node_is_extents(b)
1224 ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
1225 : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
1227 u64s = le16_to_cpu(sorted->keys.u64s);
1229 sorted->keys.u64s = cpu_to_le16(u64s);
1230 swap(sorted, b->data);
1231 set_btree_bset(b, b->set, &b->data->keys);
1234 BUG_ON(b->nr.live_u64s != u64s);
1236 btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
1239 for (k = i->start; k != vstruct_last(i);) {
1240 enum bkey_type type = btree_node_type(b);
1242 struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
1243 const char *invalid = bch2_bkey_val_invalid(c, type, u);
1248 bch2_bkey_val_to_text(c, type, buf, sizeof(buf), u);
1249 btree_err(BTREE_ERR_FIXABLE, c, b, i,
1250 "invalid bkey %s: %s", buf, invalid);
1252 btree_keys_account_key_drop(&b->nr, 0, k);
1254 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1255 memmove_u64s_down(k, bkey_next(k),
1256 (u64 *) vstruct_end(i) - (u64 *) k);
1263 bch2_bset_build_aux_tree(b, b->set, false);
1265 set_needs_whiteout(btree_bset_first(b));
1267 btree_node_reset_sib_u64s(b);
1269 mempool_free(iter, &c->fill_iter);
1273 if (ret == BTREE_RETRY_READ) {
1276 bch2_inconsistent_error(c);
1277 set_btree_node_read_error(b);
1282 static void btree_node_read_work(struct work_struct *work)
1284 struct btree_read_bio *rb =
1285 container_of(work, struct btree_read_bio, work);
1286 struct bch_fs *c = rb->c;
1287 struct btree *b = rb->bio.bi_private;
1288 struct bio *bio = &rb->bio;
1289 struct bch_devs_mask avoid;
1291 memset(&avoid, 0, sizeof(avoid));
1295 bch_info(c, "retrying read");
1297 bio_set_dev(bio, rb->pick.ca->disk_sb.bdev);
1298 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1299 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1300 bio->bi_iter.bi_size = btree_bytes(c);
1301 submit_bio_wait(bio);
1303 bch2_dev_io_err_on(bio->bi_status, rb->pick.ca, "btree read");
1304 percpu_ref_put(&rb->pick.ca->io_ref);
1306 __set_bit(rb->pick.ca->dev_idx, avoid.d);
1307 rb->pick = bch2_btree_pick_ptr(c, b, &avoid);
1309 if (!bio->bi_status &&
1310 !bch2_btree_node_read_done(c, b, !IS_ERR_OR_NULL(rb->pick.ca)))
1312 } while (!IS_ERR_OR_NULL(rb->pick.ca));
1314 set_btree_node_read_error(b);
1316 if (!IS_ERR_OR_NULL(rb->pick.ca))
1317 percpu_ref_put(&rb->pick.ca->io_ref);
1319 bch2_time_stats_update(&c->btree_read_time, rb->start_time);
1321 clear_btree_node_read_in_flight(b);
1322 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1325 static void btree_node_read_endio(struct bio *bio)
1327 struct btree_read_bio *rb =
1328 container_of(bio, struct btree_read_bio, bio);
1330 bch2_latency_acct(rb->pick.ca, rb->start_time >> 10, READ);
1332 INIT_WORK(&rb->work, btree_node_read_work);
1333 queue_work(system_unbound_wq, &rb->work);
1336 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1339 struct extent_pick_ptr pick;
1340 struct btree_read_bio *rb;
1343 trace_btree_read(c, b);
1345 pick = bch2_btree_pick_ptr(c, b, NULL);
1346 if (bch2_fs_fatal_err_on(!pick.ca, c,
1347 "btree node read error: no device to read from")) {
1348 set_btree_node_read_error(b);
1352 bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio);
1353 rb = container_of(bio, struct btree_read_bio, bio);
1355 rb->start_time = local_clock();
1357 bio_set_dev(bio, pick.ca->disk_sb.bdev);
1358 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1359 bio->bi_iter.bi_sector = pick.ptr.offset;
1360 bio->bi_iter.bi_size = btree_bytes(c);
1361 bch2_bio_map(bio, b->data);
1363 this_cpu_add(pick.ca->io_done->sectors[READ][BCH_DATA_BTREE],
1366 set_btree_node_read_in_flight(b);
1369 submit_bio_wait(bio);
1370 bio->bi_private = b;
1371 btree_node_read_work(&rb->work);
1373 bio->bi_end_io = btree_node_read_endio;
1374 bio->bi_private = b;
1379 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1380 const struct bkey_i *k, unsigned level)
1386 closure_init_stack(&cl);
1389 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1393 b = bch2_btree_node_mem_alloc(c);
1394 bch2_btree_cache_cannibalize_unlock(c);
1398 bkey_copy(&b->key, k);
1399 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1401 bch2_btree_node_read(c, b, true);
1403 if (btree_node_read_error(b)) {
1404 bch2_btree_node_hash_remove(&c->btree_cache, b);
1406 mutex_lock(&c->btree_cache.lock);
1407 list_move(&b->list, &c->btree_cache.freeable);
1408 mutex_unlock(&c->btree_cache.lock);
1414 bch2_btree_set_root_for_read(c, b);
1416 six_unlock_write(&b->lock);
1417 six_unlock_intent(&b->lock);
1422 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1423 struct btree_write *w)
1425 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1433 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1436 closure_put(&((struct btree_update *) new)->cl);
1438 bch2_journal_pin_drop(&c->journal, &w->journal);
1439 closure_wake_up(&w->wait);
1442 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1444 struct btree_write *w = btree_prev_write(b);
1446 bch2_btree_complete_write(c, b, w);
1447 btree_node_io_unlock(b);
1450 static void bch2_btree_node_write_error(struct bch_fs *c,
1451 struct btree_write_bio *wbio)
1453 struct btree *b = wbio->wbio.bio.bi_private;
1454 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1455 struct bkey_i_extent *new_key;
1456 struct bkey_s_extent e;
1457 struct bch_extent_ptr *ptr;
1458 struct btree_iter iter;
1461 __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
1465 ret = bch2_btree_iter_traverse(&iter);
1469 /* has node been freed? */
1470 if (iter.l[b->level].b != b) {
1471 /* node has been freed: */
1472 BUG_ON(!btree_node_dying(b));
1476 BUG_ON(!btree_node_hashed(b));
1478 bkey_copy(&tmp.k, &b->key);
1480 new_key = bkey_i_to_extent(&tmp.k);
1481 e = extent_i_to_s(new_key);
1482 extent_for_each_ptr_backwards(e, ptr)
1483 if (bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev))
1484 bch2_extent_drop_ptr(e, ptr);
1486 if (!bch2_extent_nr_ptrs(e.c))
1489 ret = bch2_btree_node_update_key(c, &iter, b, new_key);
1495 bch2_btree_iter_unlock(&iter);
1496 bio_put(&wbio->wbio.bio);
1497 btree_node_write_done(c, b);
1500 set_btree_node_noevict(b);
1501 bch2_fs_fatal_error(c, "fatal error writing btree node");
1505 void bch2_btree_write_error_work(struct work_struct *work)
1507 struct bch_fs *c = container_of(work, struct bch_fs,
1508 btree_write_error_work);
1512 spin_lock_irq(&c->btree_write_error_lock);
1513 bio = bio_list_pop(&c->btree_write_error_list);
1514 spin_unlock_irq(&c->btree_write_error_lock);
1519 bch2_btree_node_write_error(c,
1520 container_of(bio, struct btree_write_bio, wbio.bio));
1524 static void btree_node_write_work(struct work_struct *work)
1526 struct btree_write_bio *wbio =
1527 container_of(work, struct btree_write_bio, work);
1528 struct bch_fs *c = wbio->wbio.c;
1529 struct btree *b = wbio->wbio.bio.bi_private;
1531 btree_bounce_free(c,
1533 wbio->wbio.used_mempool,
1536 if (wbio->wbio.failed.nr) {
1537 unsigned long flags;
1539 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1540 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1541 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1543 queue_work(c->wq, &c->btree_write_error_work);
1547 bio_put(&wbio->wbio.bio);
1548 btree_node_write_done(c, b);
1551 static void btree_node_write_endio(struct bio *bio)
1553 struct bch_write_bio *wbio = to_wbio(bio);
1554 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1555 struct bch_write_bio *orig = parent ?: wbio;
1556 struct bch_fs *c = wbio->c;
1557 struct bch_dev *ca = wbio->ca;
1558 unsigned long flags;
1560 bch2_latency_acct(ca, wbio->submit_time_us, WRITE);
1562 if (bio->bi_status == BLK_STS_REMOVED ||
1563 bch2_dev_io_err_on(bio->bi_status, ca, "btree write") ||
1564 bch2_meta_write_fault("btree")) {
1565 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1566 bch2_dev_list_add_dev(&orig->failed, ca->dev_idx);
1567 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1570 if (wbio->have_io_ref)
1571 percpu_ref_put(&ca->io_ref);
1575 bio_endio(&parent->bio);
1577 struct btree_write_bio *wb =
1578 container_of(orig, struct btree_write_bio, wbio);
1580 INIT_WORK(&wb->work, btree_node_write_work);
1581 queue_work(system_unbound_wq, &wb->work);
1585 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1586 struct bset *i, unsigned sectors)
1588 const struct bch_extent_ptr *ptr;
1589 unsigned whiteout_u64s = 0;
1592 extent_for_each_ptr(bkey_i_to_s_c_extent(&b->key), ptr)
1595 ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
1597 bch2_inconsistent_error(c);
1602 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1603 enum six_lock_type lock_type_held)
1605 struct btree_write_bio *wbio;
1606 struct bset_tree *t;
1608 struct btree_node *bn = NULL;
1609 struct btree_node_entry *bne = NULL;
1611 struct bkey_s_extent e;
1612 struct bch_extent_ptr *ptr;
1613 struct sort_iter sort_iter;
1615 unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
1618 unsigned long old, new;
1621 if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1625 * We may only have a read lock on the btree node - the dirty bit is our
1626 * "lock" against racing with other threads that may be trying to start
1627 * a write, we do a write iff we clear the dirty bit. Since setting the
1628 * dirty bit requires a write lock, we can't race with other threads
1632 old = new = READ_ONCE(b->flags);
1634 if (!(old & (1 << BTREE_NODE_dirty)))
1638 !btree_node_may_write(b))
1641 if (old & (1 << BTREE_NODE_write_in_flight)) {
1642 btree_node_wait_on_io(b);
1646 new &= ~(1 << BTREE_NODE_dirty);
1647 new &= ~(1 << BTREE_NODE_need_write);
1648 new |= (1 << BTREE_NODE_write_in_flight);
1649 new |= (1 << BTREE_NODE_just_written);
1650 new ^= (1 << BTREE_NODE_write_idx);
1651 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1653 BUG_ON(btree_node_fake(b));
1654 BUG_ON(!list_empty(&b->write_blocked));
1655 BUG_ON((b->will_make_reachable != 0) != !b->written);
1657 BUG_ON(b->written >= c->opts.btree_node_size);
1658 BUG_ON(bset_written(b, btree_bset_last(b)));
1659 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1660 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1663 * We can't block on six_lock_write() here; another thread might be
1664 * trying to get a journal reservation with read locks held, and getting
1665 * a journal reservation might be blocked on flushing the journal and
1666 * doing btree writes:
1668 if (lock_type_held == SIX_LOCK_intent &&
1669 six_trylock_write(&b->lock)) {
1670 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
1671 six_unlock_write(&b->lock);
1673 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
1676 BUG_ON(b->uncompacted_whiteout_u64s);
1678 sort_iter_init(&sort_iter, b);
1681 ? sizeof(struct btree_node)
1682 : sizeof(struct btree_node_entry);
1684 bytes += b->whiteout_u64s * sizeof(u64);
1686 for_each_bset(b, t) {
1689 if (bset_written(b, i))
1692 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1693 sort_iter_add(&sort_iter,
1694 btree_bkey_first(b, t),
1695 btree_bkey_last(b, t));
1696 seq = max(seq, le64_to_cpu(i->journal_seq));
1699 order = get_order(bytes);
1700 data = btree_bounce_alloc(c, order, &used_mempool);
1708 bne->keys = b->data->keys;
1712 i->journal_seq = cpu_to_le64(seq);
1715 if (!btree_node_is_extents(b)) {
1716 sort_iter_add(&sort_iter,
1717 unwritten_whiteouts_start(c, b),
1718 unwritten_whiteouts_end(c, b));
1719 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1721 memcpy_u64s(i->start,
1722 unwritten_whiteouts_start(c, b),
1724 i->u64s = cpu_to_le16(b->whiteout_u64s);
1725 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1728 b->whiteout_u64s = 0;
1730 u64s = btree_node_is_extents(b)
1731 ? sort_extents(vstruct_last(i), &sort_iter, false)
1732 : sort_keys(i->start, &sort_iter, false);
1733 le16_add_cpu(&i->u64s, u64s);
1735 clear_needs_whiteout(i);
1737 /* do we have data to write? */
1738 if (b->written && !i->u64s)
1741 bytes_to_write = vstruct_end(i) - data;
1742 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1744 memset(data + bytes_to_write, 0,
1745 (sectors_to_write << 9) - bytes_to_write);
1747 BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1748 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1749 BUG_ON(i->seq != b->data->keys.seq);
1751 i->version = cpu_to_le16(BCACHE_BSET_VERSION);
1752 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1754 /* if we're going to be encrypting, check metadata validity first: */
1755 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
1756 validate_bset_for_write(c, b, i, sectors_to_write))
1759 bset_encrypt(c, i, b->written << 9);
1761 nonce = btree_nonce(i, b->written << 9);
1764 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1766 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1768 /* if we're not encrypting, check metadata after checksumming: */
1769 if (!bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
1770 validate_bset_for_write(c, b, i, sectors_to_write))
1774 * We handle btree write errors by immediately halting the journal -
1775 * after we've done that, we can't issue any subsequent btree writes
1776 * because they might have pointers to new nodes that failed to write.
1778 * Furthermore, there's no point in doing any more btree writes because
1779 * with the journal stopped, we're never going to update the journal to
1780 * reflect that those writes were done and the data flushed from the
1783 * Make sure to update b->written so bch2_btree_init_next() doesn't
1786 if (bch2_journal_error(&c->journal) ||
1790 trace_btree_write(b, bytes_to_write, sectors_to_write);
1792 wbio = container_of(bio_alloc_bioset(GFP_NOIO, 1 << order, &c->btree_bio),
1793 struct btree_write_bio, wbio.bio);
1794 wbio_init(&wbio->wbio.bio);
1796 wbio->wbio.order = order;
1797 wbio->wbio.used_mempool = used_mempool;
1798 wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
1799 wbio->wbio.bio.bi_iter.bi_size = sectors_to_write << 9;
1800 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
1801 wbio->wbio.bio.bi_private = b;
1803 bch2_bio_map(&wbio->wbio.bio, data);
1806 * If we're appending to a leaf node, we don't technically need FUA -
1807 * this write just needs to be persisted before the next journal write,
1808 * which will be marked FLUSH|FUA.
1810 * Similarly if we're writing a new btree root - the pointer is going to
1811 * be in the next journal entry.
1813 * But if we're writing a new btree node (that isn't a root) or
1814 * appending to a non leaf btree node, we need either FUA or a flush
1815 * when we write the parent with the new pointer. FUA is cheaper than a
1816 * flush, and writes appending to leaf nodes aren't blocking anything so
1817 * just make all btree node writes FUA to keep things sane.
1820 bkey_copy(&k.key, &b->key);
1821 e = bkey_i_to_s_extent(&k.key);
1823 extent_for_each_ptr(e, ptr)
1824 ptr->offset += b->written;
1826 b->written += sectors_to_write;
1828 bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
1831 set_btree_node_noevict(b);
1832 b->written += sectors_to_write;
1834 btree_bounce_free(c, order, used_mempool, data);
1835 btree_node_write_done(c, b);
1839 * Work that must be done with write lock held:
1841 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1843 bool invalidated_iter = false;
1844 struct btree_node_entry *bne;
1845 struct bset_tree *t;
1847 if (!btree_node_just_written(b))
1850 BUG_ON(b->whiteout_u64s);
1851 BUG_ON(b->uncompacted_whiteout_u64s);
1853 clear_btree_node_just_written(b);
1856 * Note: immediately after write, bset_unwritten()/bset_written() don't
1857 * work - the amount of data we had to write after compaction might have
1858 * been smaller than the offset of the last bset.
1860 * However, we know that all bsets have been written here, as long as
1861 * we're still holding the write lock:
1865 * XXX: decide if we really want to unconditionally sort down to a
1869 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1870 invalidated_iter = true;
1872 invalidated_iter = bch2_drop_whiteouts(b);
1876 set_needs_whiteout(bset(b, t));
1878 bch2_btree_verify(c, b);
1881 * If later we don't unconditionally sort down to a single bset, we have
1882 * to ensure this is still true:
1884 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1886 bne = want_new_bset(c, b);
1888 bch2_bset_init_next(b, &bne->keys);
1890 bch2_btree_build_aux_trees(b);
1892 return invalidated_iter;
1896 * Use this one if the node is intent locked:
1898 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1899 enum six_lock_type lock_type_held)
1901 BUG_ON(lock_type_held == SIX_LOCK_write);
1903 if (lock_type_held == SIX_LOCK_intent ||
1904 six_lock_tryupgrade(&b->lock)) {
1905 __bch2_btree_node_write(c, b, SIX_LOCK_intent);
1907 /* don't cycle lock unnecessarily: */
1908 if (btree_node_just_written(b) &&
1909 six_trylock_write(&b->lock)) {
1910 bch2_btree_post_write_cleanup(c, b);
1911 six_unlock_write(&b->lock);
1914 if (lock_type_held == SIX_LOCK_read)
1915 six_lock_downgrade(&b->lock);
1917 __bch2_btree_node_write(c, b, SIX_LOCK_read);
1921 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
1923 struct bucket_table *tbl;
1924 struct rhash_head *pos;
1929 for_each_cached_btree(b, c, tbl, i, pos)
1930 if (test_bit(flag, &b->flags)) {
1932 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
1939 void bch2_btree_flush_all_reads(struct bch_fs *c)
1941 __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
1944 void bch2_btree_flush_all_writes(struct bch_fs *c)
1946 __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
1949 void bch2_btree_verify_flushed(struct bch_fs *c)
1951 struct bucket_table *tbl;
1952 struct rhash_head *pos;
1957 for_each_cached_btree(b, c, tbl, i, pos) {
1958 unsigned long flags = READ_ONCE(b->flags);
1960 BUG_ON((flags & (1 << BTREE_NODE_dirty)) ||
1961 (flags & (1 << BTREE_NODE_write_in_flight)));
1966 ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
1968 char *out = buf, *end = buf + PAGE_SIZE;
1969 struct bucket_table *tbl;
1970 struct rhash_head *pos;
1975 for_each_cached_btree(b, c, tbl, i, pos) {
1976 unsigned long flags = READ_ONCE(b->flags);
1977 unsigned idx = (flags & (1 << BTREE_NODE_write_idx)) != 0;
1979 if (//!(flags & (1 << BTREE_NODE_dirty)) &&
1980 !b->writes[0].wait.list.first &&
1981 !b->writes[1].wait.list.first &&
1982 !(b->will_make_reachable & 1))
1985 out += scnprintf(out, end - out, "%p d %u l %u w %u b %u r %u:%lu c %u p %u\n",
1987 (flags & (1 << BTREE_NODE_dirty)) != 0,
1990 !list_empty_careful(&b->write_blocked),
1991 b->will_make_reachable != 0,
1992 b->will_make_reachable & 1,
1993 b->writes[ idx].wait.list.first != NULL,
1994 b->writes[!idx].wait.list.first != NULL);