3 #include "bkey_methods.h"
4 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
19 #include <trace/events/bcachefs.h>
21 static void verify_no_dups(struct btree *b,
22 struct bkey_packed *start,
23 struct bkey_packed *end)
25 #ifdef CONFIG_BCACHEFS_DEBUG
26 struct bkey_packed *k;
28 for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
29 struct bkey l = bkey_unpack_key(b, k);
30 struct bkey r = bkey_unpack_key(b, bkey_next(k));
32 BUG_ON(btree_node_is_extents(b)
33 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
34 : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
35 //BUG_ON(bkey_cmp_packed(&b->format, k, bkey_next(k)) >= 0);
40 static void clear_needs_whiteout(struct bset *i)
42 struct bkey_packed *k;
44 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
45 k->needs_whiteout = false;
48 static void set_needs_whiteout(struct bset *i)
50 struct bkey_packed *k;
52 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
53 k->needs_whiteout = true;
56 static void btree_bounce_free(struct bch_fs *c, unsigned order,
57 bool used_mempool, void *p)
60 mempool_free(p, &c->btree_bounce_pool);
62 vpfree(p, PAGE_SIZE << order);
65 static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
70 BUG_ON(order > btree_page_order(c));
72 *used_mempool = false;
73 p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
78 return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
81 typedef int (*sort_cmp_fn)(struct btree *,
83 struct bkey_packed *);
89 struct sort_iter_set {
90 struct bkey_packed *k, *end;
91 } data[MAX_BSETS + 1];
94 static void sort_iter_init(struct sort_iter *iter, struct btree *b)
96 memset(iter, 0, sizeof(*iter));
100 static inline void __sort_iter_sift(struct sort_iter *iter,
107 i + 1 < iter->used &&
108 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
110 swap(iter->data[i], iter->data[i + 1]);
113 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
116 __sort_iter_sift(iter, 0, cmp);
119 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
121 unsigned i = iter->used;
124 __sort_iter_sift(iter, i, cmp);
127 static void sort_iter_add(struct sort_iter *iter,
128 struct bkey_packed *k,
129 struct bkey_packed *end)
131 BUG_ON(iter->used >= ARRAY_SIZE(iter->data));
134 iter->data[iter->used++] = (struct sort_iter_set) { k, end };
137 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
139 return iter->used ? iter->data->k : NULL;
142 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
144 iter->data->k = bkey_next(iter->data->k);
146 BUG_ON(iter->data->k > iter->data->end);
148 if (iter->data->k == iter->data->end)
149 memmove(&iter->data[0],
151 sizeof(iter->data[0]) * --iter->used);
153 sort_iter_sift(iter, cmp);
156 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
159 struct bkey_packed *ret = sort_iter_peek(iter);
162 sort_iter_advance(iter, cmp);
167 static inline int sort_key_whiteouts_cmp(struct btree *b,
168 struct bkey_packed *l,
169 struct bkey_packed *r)
171 return bkey_cmp_packed(b, l, r);
174 static unsigned sort_key_whiteouts(struct bkey_packed *dst,
175 struct sort_iter *iter)
177 struct bkey_packed *in, *out = dst;
179 sort_iter_sort(iter, sort_key_whiteouts_cmp);
181 while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
183 out = bkey_next(out);
186 return (u64 *) out - (u64 *) dst;
189 static inline int sort_extent_whiteouts_cmp(struct btree *b,
190 struct bkey_packed *l,
191 struct bkey_packed *r)
193 struct bkey ul = bkey_unpack_key(b, l);
194 struct bkey ur = bkey_unpack_key(b, r);
196 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
199 static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
200 struct sort_iter *iter)
202 const struct bkey_format *f = &iter->b->format;
203 struct bkey_packed *in, *out = dst;
205 bool prev = false, l_packed = false;
206 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
207 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
210 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
212 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
214 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
215 EBUG_ON(bkeyp_val_u64s(f, in));
216 EBUG_ON(in->type != KEY_TYPE_DISCARD);
218 r.k = bkey_unpack_key(iter->b, in);
221 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
222 if (bkey_cmp(l.k.p, r.k.p) >= 0)
226 ? min(max_packed_size, max_packed_offset -
227 bkey_start_offset(&l.k))
230 new_size = min(new_size, r.k.p.offset -
231 bkey_start_offset(&l.k));
233 BUG_ON(new_size < l.k.size);
235 bch2_key_resize(&l.k, new_size);
237 if (bkey_cmp(l.k.p, r.k.p) >= 0)
240 bch2_cut_front(l.k.p, &r);
244 if (!bch2_bkey_pack(out, &l, f)) {
248 out = bkey_next(out);
253 l_packed = bkey_packed(in);
257 if (!bch2_bkey_pack(out, &l, f)) {
261 out = bkey_next(out);
264 return (u64 *) out - (u64 *) dst;
267 static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
269 enum compact_mode mode)
271 unsigned live_u64s = b->nr.bset_u64s[t - b->set];
272 unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
274 if (live_u64s == bset_u64s)
277 if (mode == COMPACT_LAZY) {
278 if (live_u64s * 4 < bset_u64s * 3 ||
279 (compacting && bset_unwritten(b, bset(b, t))))
280 return bset_u64s - live_u64s;
282 if (bset_written(b, bset(b, t)))
283 return bset_u64s - live_u64s;
289 bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
290 enum compact_mode mode)
292 const struct bkey_format *f = &b->format;
294 struct bkey_packed *whiteouts = NULL;
295 struct bkey_packed *u_start, *u_pos;
296 struct sort_iter sort_iter;
297 unsigned order, whiteout_u64s = 0, u64s;
298 bool used_mempool, compacting = false;
301 whiteout_u64s += should_compact_bset(b, t,
302 whiteout_u64s != 0, mode);
307 sort_iter_init(&sort_iter, b);
309 whiteout_u64s += b->whiteout_u64s;
310 order = get_order(whiteout_u64s * sizeof(u64));
312 whiteouts = btree_bounce_alloc(c, order, &used_mempool);
313 u_start = u_pos = whiteouts;
315 memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
317 u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
319 sort_iter_add(&sort_iter, u_start, u_pos);
321 for_each_bset(b, t) {
322 struct bset *i = bset(b, t);
323 struct bkey_packed *k, *n, *out, *start, *end;
324 struct btree_node_entry *src = NULL, *dst = NULL;
326 if (t != b->set && bset_unwritten(b, i)) {
327 src = container_of(i, struct btree_node_entry, keys);
328 dst = max(write_block(b),
329 (void *) btree_bkey_last(b, t -1));
332 if (!should_compact_bset(b, t, compacting, mode)) {
334 memmove(dst, src, sizeof(*src) +
335 le16_to_cpu(src->keys.u64s) *
338 set_btree_bset(b, t, i);
346 end = vstruct_last(i);
349 memmove(dst, src, sizeof(*src));
351 set_btree_bset(b, t, i);
356 for (k = start; k != end; k = n) {
359 if (bkey_deleted(k) && btree_node_is_extents(b))
362 if (bkey_whiteout(k) && !k->needs_whiteout)
365 if (bkey_whiteout(k)) {
366 unreserve_whiteout(b, t, k);
367 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
368 set_bkeyp_val_u64s(f, u_pos, 0);
369 u_pos = bkey_next(u_pos);
370 } else if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
372 out = bkey_next(out);
376 sort_iter_add(&sort_iter, u_start, u_pos);
378 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
379 i->u64s = cpu_to_le16((u64 *) out - i->_data);
380 set_btree_bset_end(b, t);
381 bch2_bset_set_no_aux_tree(b, t);
385 b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
387 BUG_ON((void *) unwritten_whiteouts_start(c, b) <
388 (void *) btree_bkey_last(b, bset_tree_last(b)));
390 u64s = btree_node_is_extents(b)
391 ? sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
393 : sort_key_whiteouts(unwritten_whiteouts_start(c, b),
396 BUG_ON(u64s > b->whiteout_u64s);
397 BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
398 BUG_ON(u_pos != whiteouts && !u64s);
400 if (u64s != b->whiteout_u64s) {
401 void *src = unwritten_whiteouts_start(c, b);
403 b->whiteout_u64s = u64s;
404 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
408 unwritten_whiteouts_start(c, b),
409 unwritten_whiteouts_end(c, b));
411 btree_bounce_free(c, order, used_mempool, whiteouts);
413 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
414 bch2_btree_build_aux_trees(b);
416 bch_btree_keys_u64s_remaining(c, b);
417 bch2_verify_btree_nr_keys(b);
422 static bool bch2_drop_whiteouts(struct btree *b)
427 for_each_bset(b, t) {
428 struct bset *i = bset(b, t);
429 struct bkey_packed *k, *n, *out, *start, *end;
431 if (!should_compact_bset(b, t, true, true))
434 start = btree_bkey_first(b, t);
435 end = btree_bkey_last(b, t);
437 if (bset_unwritten(b, i) &&
440 max_t(struct bset *, write_block(b),
441 (void *) btree_bkey_last(b, t -1));
443 memmove(dst, i, sizeof(struct bset));
445 set_btree_bset(b, t, i);
450 for (k = start; k != end; k = n) {
453 if (!bkey_whiteout(k)) {
455 out = bkey_next(out);
459 i->u64s = cpu_to_le16((u64 *) out - i->_data);
460 bch2_bset_set_no_aux_tree(b, t);
464 bch2_verify_btree_nr_keys(b);
469 static inline int sort_keys_cmp(struct btree *b,
470 struct bkey_packed *l,
471 struct bkey_packed *r)
473 return bkey_cmp_packed(b, l, r) ?:
474 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
475 (int) l->needs_whiteout - (int) r->needs_whiteout;
478 static unsigned sort_keys(struct bkey_packed *dst,
479 struct sort_iter *iter,
480 bool filter_whiteouts)
482 const struct bkey_format *f = &iter->b->format;
483 struct bkey_packed *in, *next, *out = dst;
485 sort_iter_sort(iter, sort_keys_cmp);
487 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
488 if (bkey_whiteout(in) &&
489 (filter_whiteouts || !in->needs_whiteout))
492 if (bkey_whiteout(in) &&
493 (next = sort_iter_peek(iter)) &&
494 !bkey_cmp_packed(iter->b, in, next)) {
495 BUG_ON(in->needs_whiteout &&
496 next->needs_whiteout);
498 * XXX racy, called with read lock from write path
500 * leads to spurious BUG_ON() in bkey_unpack_key() in
503 next->needs_whiteout |= in->needs_whiteout;
507 if (bkey_whiteout(in)) {
508 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
509 set_bkeyp_val_u64s(f, out, 0);
513 out = bkey_next(out);
516 return (u64 *) out - (u64 *) dst;
519 static inline int sort_extents_cmp(struct btree *b,
520 struct bkey_packed *l,
521 struct bkey_packed *r)
523 return bkey_cmp_packed(b, l, r) ?:
524 (int) bkey_deleted(l) - (int) bkey_deleted(r);
527 static unsigned sort_extents(struct bkey_packed *dst,
528 struct sort_iter *iter,
529 bool filter_whiteouts)
531 struct bkey_packed *in, *out = dst;
533 sort_iter_sort(iter, sort_extents_cmp);
535 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
536 if (bkey_deleted(in))
539 if (bkey_whiteout(in) &&
540 (filter_whiteouts || !in->needs_whiteout))
544 out = bkey_next(out);
547 return (u64 *) out - (u64 *) dst;
550 static void btree_node_sort(struct bch_fs *c, struct btree *b,
551 struct btree_iter *iter,
554 bool filter_whiteouts)
556 struct btree_node *out;
557 struct sort_iter sort_iter;
559 struct bset *start_bset = bset(b, &b->set[start_idx]);
560 bool used_mempool = false;
562 unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
563 bool sorting_entire_node = start_idx == 0 &&
566 sort_iter_init(&sort_iter, b);
568 for (t = b->set + start_idx;
569 t < b->set + end_idx;
571 u64s += le16_to_cpu(bset(b, t)->u64s);
572 sort_iter_add(&sort_iter,
573 btree_bkey_first(b, t),
574 btree_bkey_last(b, t));
577 order = sorting_entire_node
578 ? btree_page_order(c)
579 : get_order(__vstruct_bytes(struct btree_node, u64s));
581 out = btree_bounce_alloc(c, order, &used_mempool);
583 start_time = local_clock();
585 if (btree_node_is_extents(b))
586 filter_whiteouts = bset_written(b, start_bset);
588 u64s = btree_node_is_extents(b)
589 ? sort_extents(out->keys.start, &sort_iter, filter_whiteouts)
590 : sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
592 out->keys.u64s = cpu_to_le16(u64s);
594 BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
596 if (sorting_entire_node)
597 bch2_time_stats_update(&c->btree_sort_time, start_time);
599 /* Make sure we preserve bset journal_seq: */
600 for (t = b->set + start_idx + 1;
601 t < b->set + end_idx;
603 start_bset->journal_seq =
604 max(start_bset->journal_seq,
605 bset(b, t)->journal_seq);
607 if (sorting_entire_node) {
608 unsigned u64s = le16_to_cpu(out->keys.u64s);
610 BUG_ON(order != btree_page_order(c));
613 * Our temporary buffer is the same size as the btree node's
614 * buffer, we can just swap buffers instead of doing a big
618 out->keys.u64s = cpu_to_le16(u64s);
620 set_btree_bset(b, b->set, &b->data->keys);
622 start_bset->u64s = out->keys.u64s;
623 memcpy_u64s(start_bset->start,
625 le16_to_cpu(out->keys.u64s));
628 for (i = start_idx + 1; i < end_idx; i++)
629 b->nr.bset_u64s[start_idx] +=
634 for (i = start_idx + 1; i < b->nsets; i++) {
635 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
636 b->set[i] = b->set[i + shift];
639 for (i = b->nsets; i < MAX_BSETS; i++)
640 b->nr.bset_u64s[i] = 0;
642 set_btree_bset_end(b, &b->set[start_idx]);
643 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
645 btree_bounce_free(c, order, used_mempool, out);
647 bch2_verify_btree_nr_keys(b);
650 /* Sort + repack in a new format: */
651 static struct btree_nr_keys sort_repack(struct bset *dst,
653 struct btree_node_iter *src_iter,
654 struct bkey_format *out_f,
655 bool filter_whiteouts)
657 struct bkey_format *in_f = &src->format;
658 struct bkey_packed *in, *out = vstruct_last(dst);
659 struct btree_nr_keys nr;
661 memset(&nr, 0, sizeof(nr));
663 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
664 if (filter_whiteouts && bkey_whiteout(in))
667 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
668 ? in_f : &bch2_bkey_format_current, in))
669 out->format = KEY_FORMAT_LOCAL_BTREE;
671 bch2_bkey_unpack(src, (void *) out, in);
673 btree_keys_account_key_add(&nr, 0, out);
674 out = bkey_next(out);
677 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
681 /* Sort, repack, and merge: */
682 static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
685 struct btree_node_iter *iter,
686 struct bkey_format *out_f,
687 bool filter_whiteouts,
688 key_filter_fn filter,
691 struct bkey_packed *k, *prev = NULL, *out;
692 struct btree_nr_keys nr;
695 memset(&nr, 0, sizeof(nr));
697 while ((k = bch2_btree_node_iter_next_all(iter, src))) {
698 if (filter_whiteouts && bkey_whiteout(k))
702 * The filter might modify pointers, so we have to unpack the
703 * key and values to &tmp.k:
705 bch2_bkey_unpack(src, &tmp.k, k);
707 if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
710 /* prev is always unpacked, for key merging: */
714 merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE)
718 * the current key becomes the new prev: advance prev, then
719 * copy the current key - but first pack prev (in place):
722 bch2_bkey_pack(prev, (void *) prev, out_f);
724 btree_keys_account_key_add(&nr, 0, prev);
725 prev = bkey_next(prev);
727 prev = vstruct_last(dst);
730 bkey_copy(prev, &tmp.k);
734 bch2_bkey_pack(prev, (void *) prev, out_f);
735 btree_keys_account_key_add(&nr, 0, prev);
736 out = bkey_next(prev);
738 out = vstruct_last(dst);
741 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
745 void bch2_btree_sort_into(struct bch_fs *c,
749 struct btree_nr_keys nr;
750 struct btree_node_iter src_iter;
751 u64 start_time = local_clock();
753 BUG_ON(dst->nsets != 1);
755 bch2_bset_set_no_aux_tree(dst, dst->set);
757 bch2_btree_node_iter_init_from_start(&src_iter, src,
758 btree_node_is_extents(src));
760 if (btree_node_ops(src)->key_normalize ||
761 btree_node_ops(src)->key_merge)
762 nr = sort_repack_merge(c, btree_bset_first(dst),
766 btree_node_ops(src)->key_normalize,
767 btree_node_ops(src)->key_merge);
769 nr = sort_repack(btree_bset_first(dst),
774 bch2_time_stats_update(&c->btree_sort_time, start_time);
776 set_btree_bset_end(dst, dst->set);
778 dst->nr.live_u64s += nr.live_u64s;
779 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
780 dst->nr.packed_keys += nr.packed_keys;
781 dst->nr.unpacked_keys += nr.unpacked_keys;
783 bch2_verify_btree_nr_keys(dst);
786 #define SORT_CRIT (4096 / sizeof(u64))
789 * We're about to add another bset to the btree node, so if there's currently
790 * too many bsets - sort some of them together:
792 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
793 struct btree_iter *iter)
795 unsigned unwritten_idx;
798 for (unwritten_idx = 0;
799 unwritten_idx < b->nsets;
801 if (bset_unwritten(b, bset(b, &b->set[unwritten_idx])))
804 if (b->nsets - unwritten_idx > 1) {
805 btree_node_sort(c, b, iter, unwritten_idx,
810 if (unwritten_idx > 1) {
811 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
818 void bch2_btree_build_aux_trees(struct btree *b)
823 bch2_bset_build_aux_tree(b, t,
824 bset_unwritten(b, bset(b, t)) &&
825 t == bset_tree_last(b));
829 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
832 * Safe to call if there already is an unwritten bset - will only add a new bset
833 * if @b doesn't already have one.
835 * Returns true if we sorted (i.e. invalidated iterators
837 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
838 struct btree_iter *iter)
840 struct btree_node_entry *bne;
843 EBUG_ON(!(b->lock.state.seq & 1));
844 EBUG_ON(iter && iter->nodes[b->level] != b);
846 did_sort = btree_node_compact(c, b, iter);
848 bne = want_new_bset(c, b);
850 bch2_bset_init_next(b, &bne->keys);
852 bch2_btree_build_aux_trees(b);
854 if (iter && did_sort)
855 bch2_btree_iter_reinit_node(iter, b);
858 static struct nonce btree_nonce(struct btree *b,
862 return (struct nonce) {{
863 [0] = cpu_to_le32(offset),
864 [1] = ((__le32 *) &i->seq)[0],
865 [2] = ((__le32 *) &i->seq)[1],
866 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
870 static void bset_encrypt(struct bch_fs *c, struct bset *i, struct nonce nonce)
872 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
873 vstruct_end(i) - (void *) i->_data);
876 #define btree_node_error(c, b, msg, ...) \
878 if (write == READ && \
879 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
880 mustfix_fsck_err(c, \
881 "btree node read error at btree %u level %u/%u\n"\
882 "pos %llu:%llu node offset %u bset u64s %u: " msg,\
883 (b)->btree_id, (b)->level, \
884 (c)->btree_roots[(b)->btree_id].level, \
885 (b)->key.k.p.inode, (b)->key.k.p.offset, \
886 (b)->written, le16_to_cpu((i)->u64s), \
889 bch_err(c, "%s at btree %u level %u/%u\n" \
890 "pos %llu:%llu node offset %u bset u64s %u: " msg,\
892 ? "corrupt metadata in btree node write" \
893 : "btree node error", \
894 (b)->btree_id, (b)->level, \
895 (c)->btree_roots[(b)->btree_id].level, \
896 (b)->key.k.p.inode, (b)->key.k.p.offset, \
897 (b)->written, le16_to_cpu((i)->u64s), \
899 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
904 static int validate_bset(struct bch_fs *c, struct btree *b,
905 struct bset *i, unsigned sectors,
906 unsigned *whiteout_u64s, int write)
908 struct bkey_packed *k, *prev = NULL;
909 struct bpos prev_pos = POS_MIN;
910 bool seen_non_whiteout = false;
913 if (le16_to_cpu(i->version) != BCACHE_BSET_VERSION) {
914 btree_node_error(c, b, "unsupported bset version");
919 if (b->written + sectors > c->sb.btree_node_size) {
920 btree_node_error(c, b, "bset past end of btree node");
925 if (b->written && !i->u64s)
926 btree_node_error(c, b, "empty set");
928 if (!BSET_SEPARATE_WHITEOUTS(i)) {
929 seen_non_whiteout = true;
934 k != vstruct_last(i);) {
940 btree_node_error(c, b,
941 "KEY_U64s 0: %zu bytes of metadata lost",
942 vstruct_end(i) - (void *) k);
944 i->u64s = cpu_to_le16((u64 *) k - i->_data);
948 if (bkey_next(k) > vstruct_last(i)) {
949 btree_node_error(c, b,
950 "key extends past end of bset");
952 i->u64s = cpu_to_le16((u64 *) k - i->_data);
956 if (k->format > KEY_FORMAT_CURRENT) {
957 btree_node_error(c, b,
958 "invalid bkey format %u", k->format);
960 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
961 memmove_u64s_down(k, bkey_next(k),
962 (u64 *) vstruct_end(i) - (u64 *) k);
966 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
967 bch2_bkey_swab(btree_node_type(b), &b->format, k);
969 u = bkey_disassemble(b, k, &tmp);
971 invalid = bch2_btree_bkey_invalid(c, b, u);
975 bch2_bkey_val_to_text(c, btree_node_type(b),
976 buf, sizeof(buf), u);
977 btree_node_error(c, b,
978 "invalid bkey %s: %s", buf, invalid);
980 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
981 memmove_u64s_down(k, bkey_next(k),
982 (u64 *) vstruct_end(i) - (u64 *) k);
987 * with the separate whiteouts thing (used for extents), the
988 * second set of keys actually can have whiteouts too, so we
989 * can't solely go off bkey_whiteout()...
992 if (!seen_non_whiteout &&
993 (!bkey_whiteout(k) ||
994 (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
995 *whiteout_u64s = k->_data - i->_data;
996 seen_non_whiteout = true;
997 } else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
998 btree_node_error(c, b,
999 "keys out of order: %llu:%llu > %llu:%llu",
1003 bkey_start_offset(u.k));
1004 /* XXX: repair this */
1012 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1017 int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b)
1019 struct btree_node_entry *bne;
1020 struct bset *i = &b->data->keys;
1021 struct btree_node_iter *iter;
1022 struct btree_node *sorted;
1026 struct bch_csum csum;
1028 int ret, should_retry = 0, write = READ;
1030 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
1031 __bch2_btree_node_iter_init(iter, btree_node_is_extents(b));
1033 err = "dynamic fault";
1034 if (bch2_meta_read_fault("btree"))
1037 while (b->written < c->sb.btree_node_size) {
1038 unsigned sectors, whiteout_u64s = 0;
1044 if (le64_to_cpu(b->data->magic) != bset_magic(c))
1047 err = "bad btree header";
1048 if (!b->data->keys.seq)
1051 err = "unknown checksum type";
1052 if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
1055 nonce = btree_nonce(b, i, b->written << 9);
1056 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1058 err = "bad checksum";
1059 if (bch2_crc_cmp(csum, b->data->csum))
1062 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
1064 (void *) &b->data->keys -
1065 (void *) &b->data->flags);
1066 nonce = nonce_add(nonce,
1067 round_up((void *) &b->data->keys -
1068 (void *) &b->data->flags,
1069 CHACHA20_BLOCK_SIZE));
1070 bset_encrypt(c, i, nonce);
1072 sectors = vstruct_sectors(b->data, c->block_bits);
1074 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
1075 u64 *p = (u64 *) &b->data->ptr;
1078 bch2_bpos_swab(&b->data->min_key);
1079 bch2_bpos_swab(&b->data->max_key);
1082 err = "incorrect btree id";
1083 if (BTREE_NODE_ID(b->data) != b->btree_id)
1086 err = "incorrect level";
1087 if (BTREE_NODE_LEVEL(b->data) != b->level)
1090 err = "incorrect max key";
1091 if (bkey_cmp(b->data->max_key, b->key.k.p))
1095 * not correct anymore, due to btree node write error
1098 * need to add b->data->seq to btree keys and verify
1101 err = "incorrect backpointer";
1102 if (!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
1106 err = bch2_bkey_format_validate(&b->data->format);
1110 set_btree_bset(b, b->set, &b->data->keys);
1112 btree_node_set_format(b, b->data->format);
1114 bne = write_block(b);
1117 if (i->seq != b->data->keys.seq)
1120 err = "unknown checksum type";
1121 if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
1124 nonce = btree_nonce(b, i, b->written << 9);
1125 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1127 err = "bad checksum";
1128 if (bch2_crc_cmp(csum, bne->csum))
1131 bset_encrypt(c, i, nonce);
1133 sectors = vstruct_sectors(bne, c->block_bits);
1136 ret = validate_bset(c, b, i, sectors, &whiteout_u64s, READ);
1140 b->written += sectors;
1142 err = "insufficient memory";
1143 ret = bch2_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
1150 __bch2_btree_node_iter_push(iter, b,
1152 vstruct_idx(i, whiteout_u64s));
1154 __bch2_btree_node_iter_push(iter, b,
1155 vstruct_idx(i, whiteout_u64s),
1159 err = "corrupted btree";
1160 for (bne = write_block(b);
1161 bset_byte_offset(b, bne) < btree_bytes(c);
1162 bne = (void *) bne + block_bytes(c))
1163 if (bne->keys.seq == b->data->keys.seq)
1166 sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
1167 sorted->keys.u64s = 0;
1169 b->nr = btree_node_is_extents(b)
1170 ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
1171 : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
1173 u64s = le16_to_cpu(sorted->keys.u64s);
1175 sorted->keys.u64s = cpu_to_le16(u64s);
1176 swap(sorted, b->data);
1177 set_btree_bset(b, b->set, &b->data->keys);
1180 BUG_ON(b->nr.live_u64s != u64s);
1182 btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
1184 bch2_bset_build_aux_tree(b, b->set, false);
1186 set_needs_whiteout(btree_bset_first(b));
1188 btree_node_reset_sib_u64s(b);
1190 mempool_free(iter, &c->fill_iter);
1191 return should_retry;
1193 btree_node_error(c, b, "%s", err);
1195 bch2_inconsistent_error(c);
1196 set_btree_node_read_error(b);
1203 static void btree_node_read_work(struct work_struct *work)
1205 struct btree_read_bio *rb =
1206 container_of(work, struct btree_read_bio, work);
1207 struct bch_fs *c = rb->c;
1208 struct bch_dev *ca = rb->pick.ca;
1209 struct btree *b = rb->bio.bi_private;
1210 struct bio *bio = &rb->bio;
1211 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
1212 const struct bch_extent_ptr *ptr;
1213 struct bch_devs_mask avoid;
1215 bch2_dev_io_err_on(bio->bi_error, rb->pick.ca, "btree read");
1216 percpu_ref_put(&rb->pick.ca->io_ref);
1218 if (!bio->bi_error &&
1219 !bch2_btree_node_read_done(c, b))
1224 bch2_time_stats_update(&c->btree_read_time, rb->start_time);
1226 clear_btree_node_read_in_flight(b);
1227 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1230 memset(&avoid, 0, sizeof(avoid));
1231 __set_bit(ca->dev_idx, avoid.d);
1233 extent_for_each_ptr(e, ptr) {
1234 memset(&rb->pick, 0, sizeof(rb->pick));
1235 bch2_get_read_device(c, e.k, ptr, NULL, &avoid, &rb->pick);
1241 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1242 bio->bi_bdev = rb->pick.ca->disk_sb.bdev;
1243 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1244 bio->bi_iter.bi_size = btree_bytes(c);
1245 submit_bio_wait(bio);
1247 bch2_dev_io_err_on(bio->bi_error, rb->pick.ca, "btree read");
1248 percpu_ref_put(&rb->pick.ca->io_ref);
1250 if (!bio->bi_error &&
1251 !bch2_btree_node_read_done(c, b))
1255 set_btree_node_read_error(b);
1259 static void btree_node_read_endio(struct bio *bio)
1261 struct btree_read_bio *rb =
1262 container_of(bio, struct btree_read_bio, bio);
1264 INIT_WORK(&rb->work, btree_node_read_work);
1265 schedule_work(&rb->work);
1268 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1271 struct extent_pick_ptr pick;
1272 struct btree_read_bio *rb;
1275 trace_btree_read(c, b);
1277 pick = bch2_btree_pick_ptr(c, b);
1278 if (bch2_fs_fatal_err_on(!pick.ca, c,
1279 "btree node read error: no device to read from")) {
1280 set_btree_node_read_error(b);
1284 bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_read_bio);
1285 rb = container_of(bio, struct btree_read_bio, bio);
1287 rb->start_time = local_clock();
1289 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1290 bio->bi_bdev = pick.ca->disk_sb.bdev;
1291 bio->bi_iter.bi_sector = pick.ptr.offset;
1292 bio->bi_iter.bi_size = btree_bytes(c);
1293 bch2_bio_map(bio, b->data);
1295 this_cpu_add(pick.ca->io_done->sectors[READ][BCH_DATA_BTREE],
1298 set_btree_node_read_in_flight(b);
1301 submit_bio_wait(bio);
1302 bio->bi_private = b;
1303 btree_node_read_work(&rb->work);
1305 bio->bi_end_io = btree_node_read_endio;
1306 bio->bi_private = b;
1311 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1312 const struct bkey_i *k, unsigned level)
1318 closure_init_stack(&cl);
1321 ret = bch2_btree_node_cannibalize_lock(c, &cl);
1325 b = bch2_btree_node_mem_alloc(c);
1326 bch2_btree_node_cannibalize_unlock(c);
1330 bkey_copy(&b->key, k);
1331 BUG_ON(bch2_btree_node_hash_insert(c, b, level, id));
1333 bch2_btree_node_read(c, b, true);
1334 six_unlock_write(&b->lock);
1336 if (btree_node_read_error(b)) {
1337 six_unlock_intent(&b->lock);
1341 bch2_btree_set_root_for_read(c, b);
1342 six_unlock_intent(&b->lock);
1347 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1348 struct btree_write *w)
1350 bch2_journal_pin_drop(&c->journal, &w->journal);
1351 closure_wake_up(&w->wait);
1354 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1356 struct btree_write *w = btree_prev_write(b);
1358 bch2_btree_complete_write(c, b, w);
1359 btree_node_io_unlock(b);
1362 static void bch2_btree_node_write_error(struct bch_fs *c,
1363 struct bch_write_bio *wbio)
1365 struct btree *b = wbio->bio.bi_private;
1366 struct closure *cl = wbio->cl;
1367 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1368 struct bkey_i_extent *new_key;
1370 six_lock_read(&b->lock);
1371 bkey_copy(&tmp.k, &b->key);
1372 six_unlock_read(&b->lock);
1374 if (!bkey_extent_is_data(&tmp.k.k) || !PTR_HASH(&tmp.k)) {
1375 /* Node has been freed: */
1379 new_key = bkey_i_to_extent(&tmp.k);
1381 while (wbio->replicas_failed) {
1382 unsigned idx = __fls(wbio->replicas_failed);
1384 bch2_extent_drop_ptr_idx(extent_i_to_s(new_key), idx);
1385 wbio->replicas_failed ^= 1 << idx;
1388 if (!bch2_extent_nr_ptrs(extent_i_to_s_c(new_key)) ||
1389 bch2_btree_node_update_key(c, b, new_key)) {
1390 set_btree_node_noevict(b);
1391 bch2_fatal_error(c);
1394 bio_put(&wbio->bio);
1395 btree_node_write_done(c, b);
1400 void bch2_btree_write_error_work(struct work_struct *work)
1402 struct bch_fs *c = container_of(work, struct bch_fs,
1403 btree_write_error_work);
1407 spin_lock_irq(&c->btree_write_error_lock);
1408 bio = bio_list_pop(&c->btree_write_error_list);
1409 spin_unlock_irq(&c->btree_write_error_lock);
1414 bch2_btree_node_write_error(c, to_wbio(bio));
1418 static void btree_node_write_endio(struct bio *bio)
1420 struct btree *b = bio->bi_private;
1421 struct bch_write_bio *wbio = to_wbio(bio);
1422 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1423 struct bch_write_bio *orig = parent ?: wbio;
1424 struct closure *cl = !wbio->split ? wbio->cl : NULL;
1425 struct bch_fs *c = wbio->c;
1426 struct bch_dev *ca = wbio->ca;
1428 if (bch2_dev_io_err_on(bio->bi_error, ca, "btree write") ||
1429 bch2_meta_write_fault("btree"))
1430 set_bit(wbio->ptr_idx, (unsigned long *) &orig->replicas_failed);
1432 if (wbio->have_io_ref)
1433 percpu_ref_put(&ca->io_ref);
1437 bio_endio(&parent->bio);
1441 btree_bounce_free(c,
1446 if (wbio->replicas_failed) {
1447 unsigned long flags;
1449 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1450 bio_list_add(&c->btree_write_error_list, &wbio->bio);
1451 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1452 queue_work(c->wq, &c->btree_write_error_work);
1457 btree_node_write_done(c, b);
1462 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1463 struct bset *i, unsigned sectors)
1465 const struct bch_extent_ptr *ptr;
1466 unsigned whiteout_u64s = 0;
1469 extent_for_each_ptr(bkey_i_to_s_c_extent(&b->key), ptr)
1472 ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE);
1474 bch2_inconsistent_error(c);
1479 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1480 struct closure *parent,
1481 enum six_lock_type lock_type_held)
1483 struct bch_write_bio *wbio;
1484 struct bset_tree *t;
1486 struct btree_node *bn = NULL;
1487 struct btree_node_entry *bne = NULL;
1489 struct bkey_s_extent e;
1490 struct bch_extent_ptr *ptr;
1491 struct sort_iter sort_iter;
1493 unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
1496 unsigned long old, new;
1500 * We may only have a read lock on the btree node - the dirty bit is our
1501 * "lock" against racing with other threads that may be trying to start
1502 * a write, we do a write iff we clear the dirty bit. Since setting the
1503 * dirty bit requires a write lock, we can't race with other threads
1507 old = new = READ_ONCE(b->flags);
1509 if (!(old & (1 << BTREE_NODE_dirty)))
1513 !btree_node_may_write(b))
1516 if (old & (1 << BTREE_NODE_write_in_flight)) {
1517 btree_node_wait_on_io(b);
1521 new &= ~(1 << BTREE_NODE_dirty);
1522 new &= ~(1 << BTREE_NODE_need_write);
1523 new |= (1 << BTREE_NODE_write_in_flight);
1524 new |= (1 << BTREE_NODE_just_written);
1525 new ^= (1 << BTREE_NODE_write_idx);
1526 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1528 BUG_ON(!list_empty(&b->write_blocked));
1529 BUG_ON((b->will_make_reachable != NULL) != !b->written);
1531 BUG_ON(b->written >= c->sb.btree_node_size);
1532 BUG_ON(bset_written(b, btree_bset_last(b)));
1533 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1534 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1536 if (lock_type_held == SIX_LOCK_intent) {
1537 six_lock_write(&b->lock);
1538 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
1539 six_unlock_write(&b->lock);
1541 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
1544 BUG_ON(b->uncompacted_whiteout_u64s);
1546 sort_iter_init(&sort_iter, b);
1549 ? sizeof(struct btree_node)
1550 : sizeof(struct btree_node_entry);
1552 bytes += b->whiteout_u64s * sizeof(u64);
1554 for_each_bset(b, t) {
1557 if (bset_written(b, i))
1560 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1561 sort_iter_add(&sort_iter,
1562 btree_bkey_first(b, t),
1563 btree_bkey_last(b, t));
1564 seq = max(seq, le64_to_cpu(i->journal_seq));
1567 order = get_order(bytes);
1568 data = btree_bounce_alloc(c, order, &used_mempool);
1576 bne->keys = b->data->keys;
1580 i->journal_seq = cpu_to_le64(seq);
1583 if (!btree_node_is_extents(b)) {
1584 sort_iter_add(&sort_iter,
1585 unwritten_whiteouts_start(c, b),
1586 unwritten_whiteouts_end(c, b));
1587 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1589 memcpy_u64s(i->start,
1590 unwritten_whiteouts_start(c, b),
1592 i->u64s = cpu_to_le16(b->whiteout_u64s);
1593 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1596 b->whiteout_u64s = 0;
1598 u64s = btree_node_is_extents(b)
1599 ? sort_extents(vstruct_last(i), &sort_iter, false)
1600 : sort_keys(i->start, &sort_iter, false);
1601 le16_add_cpu(&i->u64s, u64s);
1603 clear_needs_whiteout(i);
1605 /* do we have data to write? */
1606 if (b->written && !i->u64s)
1609 bytes_to_write = vstruct_end(i) - data;
1610 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1612 memset(data + bytes_to_write, 0,
1613 (sectors_to_write << 9) - bytes_to_write);
1615 BUG_ON(b->written + sectors_to_write > c->sb.btree_node_size);
1616 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1617 BUG_ON(i->seq != b->data->keys.seq);
1619 i->version = cpu_to_le16(BCACHE_BSET_VERSION);
1620 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1622 nonce = btree_nonce(b, i, b->written << 9);
1624 /* if we're going to be encrypting, check metadata validity first: */
1625 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
1626 validate_bset_for_write(c, b, i, sectors_to_write))
1630 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
1632 (void *) &b->data->keys -
1633 (void *) &b->data->flags);
1634 nonce = nonce_add(nonce,
1635 round_up((void *) &b->data->keys -
1636 (void *) &b->data->flags,
1637 CHACHA20_BLOCK_SIZE));
1638 bset_encrypt(c, i, nonce);
1640 nonce = btree_nonce(b, i, b->written << 9);
1641 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1643 bset_encrypt(c, i, nonce);
1645 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1648 /* if we're not encrypting, check metadata after checksumming: */
1649 if (!bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
1650 validate_bset_for_write(c, b, i, sectors_to_write))
1654 * We handle btree write errors by immediately halting the journal -
1655 * after we've done that, we can't issue any subsequent btree writes
1656 * because they might have pointers to new nodes that failed to write.
1658 * Furthermore, there's no point in doing any more btree writes because
1659 * with the journal stopped, we're never going to update the journal to
1660 * reflect that those writes were done and the data flushed from the
1663 * Make sure to update b->written so bch2_btree_init_next() doesn't
1666 if (bch2_journal_error(&c->journal) ||
1670 trace_btree_write(b, bytes_to_write, sectors_to_write);
1672 wbio = wbio_init(bio_alloc_bioset(GFP_NOIO, 1 << order, &c->bio_write));
1674 wbio->order = order;
1675 wbio->used_mempool = used_mempool;
1677 wbio->bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
1678 wbio->bio.bi_iter.bi_size = sectors_to_write << 9;
1679 wbio->bio.bi_end_io = btree_node_write_endio;
1680 wbio->bio.bi_private = b;
1683 closure_get(parent);
1685 bch2_bio_map(&wbio->bio, data);
1688 * If we're appending to a leaf node, we don't technically need FUA -
1689 * this write just needs to be persisted before the next journal write,
1690 * which will be marked FLUSH|FUA.
1692 * Similarly if we're writing a new btree root - the pointer is going to
1693 * be in the next journal entry.
1695 * But if we're writing a new btree node (that isn't a root) or
1696 * appending to a non leaf btree node, we need either FUA or a flush
1697 * when we write the parent with the new pointer. FUA is cheaper than a
1698 * flush, and writes appending to leaf nodes aren't blocking anything so
1699 * just make all btree node writes FUA to keep things sane.
1702 bkey_copy(&k.key, &b->key);
1703 e = bkey_i_to_s_extent(&k.key);
1705 extent_for_each_ptr(e, ptr)
1706 ptr->offset += b->written;
1708 b->written += sectors_to_write;
1710 bch2_submit_wbio_replicas(wbio, c, BCH_DATA_BTREE, &k.key);
1713 set_btree_node_noevict(b);
1714 b->written += sectors_to_write;
1716 btree_bounce_free(c, order, used_mempool, data);
1717 btree_node_write_done(c, b);
1721 * Work that must be done with write lock held:
1723 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1725 bool invalidated_iter = false;
1726 struct btree_node_entry *bne;
1727 struct bset_tree *t;
1729 if (!btree_node_just_written(b))
1732 BUG_ON(b->whiteout_u64s);
1733 BUG_ON(b->uncompacted_whiteout_u64s);
1735 clear_btree_node_just_written(b);
1738 * Note: immediately after write, bset_unwritten()/bset_written() don't
1739 * work - the amount of data we had to write after compaction might have
1740 * been smaller than the offset of the last bset.
1742 * However, we know that all bsets have been written here, as long as
1743 * we're still holding the write lock:
1747 * XXX: decide if we really want to unconditionally sort down to a
1751 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1752 invalidated_iter = true;
1754 invalidated_iter = bch2_drop_whiteouts(b);
1758 set_needs_whiteout(bset(b, t));
1760 bch2_btree_verify(c, b);
1763 * If later we don't unconditionally sort down to a single bset, we have
1764 * to ensure this is still true:
1766 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1768 bne = want_new_bset(c, b);
1770 bch2_bset_init_next(b, &bne->keys);
1772 bch2_btree_build_aux_trees(b);
1774 return invalidated_iter;
1778 * Use this one if the node is intent locked:
1780 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1781 struct closure *parent,
1782 enum six_lock_type lock_type_held)
1784 BUG_ON(lock_type_held == SIX_LOCK_write);
1786 if (lock_type_held == SIX_LOCK_intent ||
1787 six_trylock_convert(&b->lock, SIX_LOCK_read,
1789 __bch2_btree_node_write(c, b, parent, SIX_LOCK_intent);
1791 /* don't cycle lock unnecessarily: */
1792 if (btree_node_just_written(b)) {
1793 six_lock_write(&b->lock);
1794 bch2_btree_post_write_cleanup(c, b);
1795 six_unlock_write(&b->lock);
1798 if (lock_type_held == SIX_LOCK_read)
1799 six_lock_downgrade(&b->lock);
1801 __bch2_btree_node_write(c, b, parent, SIX_LOCK_read);
1805 void bch2_btree_verify_flushed(struct bch_fs *c)
1807 struct bucket_table *tbl;
1808 struct rhash_head *pos;
1813 tbl = rht_dereference_rcu(c->btree_cache_table.tbl,
1814 &c->btree_cache_table);
1816 for (i = 0; i < tbl->size; i++)
1817 rht_for_each_entry_rcu(b, pos, tbl, i, hash)
1818 BUG_ON(btree_node_dirty(b));