3 #include "bkey_methods.h"
4 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
16 #include "journal_reclaim.h"
17 #include "journal_seq_blacklist.h"
20 #include <trace/events/bcachefs.h>
22 /* btree_node_iter_large: */
24 #define btree_node_iter_cmp_heap(h, _l, _r) \
25 __btree_node_iter_cmp(b, \
26 __btree_node_offset_to_key(b, (_l).k), \
27 __btree_node_offset_to_key(b, (_r).k))
29 void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter,
31 const struct bkey_packed *k,
32 const struct bkey_packed *end)
35 struct btree_node_iter_set n =
36 ((struct btree_node_iter_set) {
37 __btree_node_key_to_offset(b, k),
38 __btree_node_key_to_offset(b, end)
41 __heap_add(iter, n, btree_node_iter_cmp_heap);
45 void bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter,
48 iter->data->k += __btree_node_offset_to_key(b, iter->data->k)->u64s;
51 EBUG_ON(iter->data->k > iter->data->end);
53 if (iter->data->k == iter->data->end)
54 heap_del(iter, 0, btree_node_iter_cmp_heap);
56 heap_sift_down(iter, 0, btree_node_iter_cmp_heap);
59 static void verify_no_dups(struct btree *b,
60 struct bkey_packed *start,
61 struct bkey_packed *end)
63 #ifdef CONFIG_BCACHEFS_DEBUG
64 struct bkey_packed *k;
66 for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
67 struct bkey l = bkey_unpack_key(b, k);
68 struct bkey r = bkey_unpack_key(b, bkey_next(k));
70 BUG_ON(btree_node_is_extents(b)
71 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
72 : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
73 //BUG_ON(bkey_cmp_packed(&b->format, k, bkey_next(k)) >= 0);
78 static void clear_needs_whiteout(struct bset *i)
80 struct bkey_packed *k;
82 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
83 k->needs_whiteout = false;
86 static void set_needs_whiteout(struct bset *i)
88 struct bkey_packed *k;
90 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
91 k->needs_whiteout = true;
94 static void btree_bounce_free(struct bch_fs *c, unsigned order,
95 bool used_mempool, void *p)
98 mempool_free(p, &c->btree_bounce_pool);
100 vpfree(p, PAGE_SIZE << order);
103 static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
108 BUG_ON(order > btree_page_order(c));
110 *used_mempool = false;
111 p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
115 *used_mempool = true;
116 return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
119 typedef int (*sort_cmp_fn)(struct btree *,
120 struct bkey_packed *,
121 struct bkey_packed *);
127 struct sort_iter_set {
128 struct bkey_packed *k, *end;
129 } data[MAX_BSETS + 1];
132 static void sort_iter_init(struct sort_iter *iter, struct btree *b)
134 memset(iter, 0, sizeof(*iter));
138 static inline void __sort_iter_sift(struct sort_iter *iter,
145 i + 1 < iter->used &&
146 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
148 swap(iter->data[i], iter->data[i + 1]);
151 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
154 __sort_iter_sift(iter, 0, cmp);
157 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
159 unsigned i = iter->used;
162 __sort_iter_sift(iter, i, cmp);
165 static void sort_iter_add(struct sort_iter *iter,
166 struct bkey_packed *k,
167 struct bkey_packed *end)
169 BUG_ON(iter->used >= ARRAY_SIZE(iter->data));
172 iter->data[iter->used++] = (struct sort_iter_set) { k, end };
175 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
177 return iter->used ? iter->data->k : NULL;
180 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
182 iter->data->k = bkey_next(iter->data->k);
184 BUG_ON(iter->data->k > iter->data->end);
186 if (iter->data->k == iter->data->end)
187 array_remove_item(iter->data, iter->used, 0);
189 sort_iter_sift(iter, cmp);
192 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
195 struct bkey_packed *ret = sort_iter_peek(iter);
198 sort_iter_advance(iter, cmp);
203 static inline int sort_key_whiteouts_cmp(struct btree *b,
204 struct bkey_packed *l,
205 struct bkey_packed *r)
207 return bkey_cmp_packed(b, l, r);
210 static unsigned sort_key_whiteouts(struct bkey_packed *dst,
211 struct sort_iter *iter)
213 struct bkey_packed *in, *out = dst;
215 sort_iter_sort(iter, sort_key_whiteouts_cmp);
217 while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
219 out = bkey_next(out);
222 return (u64 *) out - (u64 *) dst;
225 static inline int sort_extent_whiteouts_cmp(struct btree *b,
226 struct bkey_packed *l,
227 struct bkey_packed *r)
229 struct bkey ul = bkey_unpack_key(b, l);
230 struct bkey ur = bkey_unpack_key(b, r);
232 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
235 static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
236 struct sort_iter *iter)
238 const struct bkey_format *f = &iter->b->format;
239 struct bkey_packed *in, *out = dst;
241 bool prev = false, l_packed = false;
242 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
243 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
246 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
248 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
250 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
251 if (bkey_deleted(in))
254 EBUG_ON(bkeyp_val_u64s(f, in));
255 EBUG_ON(in->type != KEY_TYPE_DISCARD);
257 r.k = bkey_unpack_key(iter->b, in);
260 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
261 if (bkey_cmp(l.k.p, r.k.p) >= 0)
265 ? min(max_packed_size, max_packed_offset -
266 bkey_start_offset(&l.k))
269 new_size = min(new_size, r.k.p.offset -
270 bkey_start_offset(&l.k));
272 BUG_ON(new_size < l.k.size);
274 bch2_key_resize(&l.k, new_size);
276 if (bkey_cmp(l.k.p, r.k.p) >= 0)
279 bch2_cut_front(l.k.p, &r);
283 if (!bch2_bkey_pack(out, &l, f)) {
287 out = bkey_next(out);
292 l_packed = bkey_packed(in);
296 if (!bch2_bkey_pack(out, &l, f)) {
300 out = bkey_next(out);
303 return (u64 *) out - (u64 *) dst;
306 static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
308 enum compact_mode mode)
310 unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
311 unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set];
313 if (mode == COMPACT_LAZY) {
314 if (should_compact_bset_lazy(b, t) ||
315 (compacting && !bset_written(b, bset(b, t))))
318 if (bset_written(b, bset(b, t)))
325 bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
326 enum compact_mode mode)
328 const struct bkey_format *f = &b->format;
330 struct bkey_packed *whiteouts = NULL;
331 struct bkey_packed *u_start, *u_pos;
332 struct sort_iter sort_iter;
333 unsigned order, whiteout_u64s = 0, u64s;
334 bool used_mempool, compacting = false;
337 whiteout_u64s += should_compact_bset(b, t,
338 whiteout_u64s != 0, mode);
343 sort_iter_init(&sort_iter, b);
345 whiteout_u64s += b->whiteout_u64s;
346 order = get_order(whiteout_u64s * sizeof(u64));
348 whiteouts = btree_bounce_alloc(c, order, &used_mempool);
349 u_start = u_pos = whiteouts;
351 memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
353 u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
355 sort_iter_add(&sort_iter, u_start, u_pos);
357 for_each_bset(b, t) {
358 struct bset *i = bset(b, t);
359 struct bkey_packed *k, *n, *out, *start, *end;
360 struct btree_node_entry *src = NULL, *dst = NULL;
362 if (t != b->set && !bset_written(b, i)) {
363 src = container_of(i, struct btree_node_entry, keys);
364 dst = max(write_block(b),
365 (void *) btree_bkey_last(b, t -1));
368 if (!should_compact_bset(b, t, compacting, mode)) {
370 memmove(dst, src, sizeof(*src) +
371 le16_to_cpu(src->keys.u64s) *
374 set_btree_bset(b, t, i);
382 end = vstruct_last(i);
385 memmove(dst, src, sizeof(*src));
387 set_btree_bset(b, t, i);
392 for (k = start; k != end; k = n) {
395 if (bkey_deleted(k) && btree_node_is_extents(b))
398 if (bkey_whiteout(k) && !k->needs_whiteout)
401 if (bkey_whiteout(k)) {
402 unreserve_whiteout(b, k);
403 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
404 set_bkeyp_val_u64s(f, u_pos, 0);
405 u_pos = bkey_next(u_pos);
406 } else if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
408 out = bkey_next(out);
412 sort_iter_add(&sort_iter, u_start, u_pos);
414 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
415 i->u64s = cpu_to_le16((u64 *) out - i->_data);
416 set_btree_bset_end(b, t);
417 bch2_bset_set_no_aux_tree(b, t);
421 b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
423 BUG_ON((void *) unwritten_whiteouts_start(c, b) <
424 (void *) btree_bkey_last(b, bset_tree_last(b)));
426 u64s = btree_node_is_extents(b)
427 ? sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
429 : sort_key_whiteouts(unwritten_whiteouts_start(c, b),
432 BUG_ON(u64s > b->whiteout_u64s);
433 BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
434 BUG_ON(u_pos != whiteouts && !u64s);
436 if (u64s != b->whiteout_u64s) {
437 void *src = unwritten_whiteouts_start(c, b);
439 b->whiteout_u64s = u64s;
440 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
444 unwritten_whiteouts_start(c, b),
445 unwritten_whiteouts_end(c, b));
447 btree_bounce_free(c, order, used_mempool, whiteouts);
449 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
450 bch2_btree_build_aux_trees(b);
452 bch_btree_keys_u64s_remaining(c, b);
453 bch2_verify_btree_nr_keys(b);
458 static bool bch2_drop_whiteouts(struct btree *b)
463 for_each_bset(b, t) {
464 struct bset *i = bset(b, t);
465 struct bkey_packed *k, *n, *out, *start, *end;
467 if (!should_compact_bset(b, t, true, COMPACT_WRITTEN))
470 start = btree_bkey_first(b, t);
471 end = btree_bkey_last(b, t);
473 if (!bset_written(b, i) &&
476 max_t(struct bset *, write_block(b),
477 (void *) btree_bkey_last(b, t -1));
479 memmove(dst, i, sizeof(struct bset));
481 set_btree_bset(b, t, i);
486 for (k = start; k != end; k = n) {
489 if (!bkey_whiteout(k)) {
491 out = bkey_next(out);
495 i->u64s = cpu_to_le16((u64 *) out - i->_data);
496 bch2_bset_set_no_aux_tree(b, t);
500 bch2_verify_btree_nr_keys(b);
505 static inline int sort_keys_cmp(struct btree *b,
506 struct bkey_packed *l,
507 struct bkey_packed *r)
509 return bkey_cmp_packed(b, l, r) ?:
510 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
511 (int) l->needs_whiteout - (int) r->needs_whiteout;
514 static unsigned sort_keys(struct bkey_packed *dst,
515 struct sort_iter *iter,
516 bool filter_whiteouts)
518 const struct bkey_format *f = &iter->b->format;
519 struct bkey_packed *in, *next, *out = dst;
521 sort_iter_sort(iter, sort_keys_cmp);
523 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
524 if (bkey_whiteout(in) &&
525 (filter_whiteouts || !in->needs_whiteout))
528 if (bkey_whiteout(in) &&
529 (next = sort_iter_peek(iter)) &&
530 !bkey_cmp_packed(iter->b, in, next)) {
531 BUG_ON(in->needs_whiteout &&
532 next->needs_whiteout);
534 * XXX racy, called with read lock from write path
536 * leads to spurious BUG_ON() in bkey_unpack_key() in
539 next->needs_whiteout |= in->needs_whiteout;
543 if (bkey_whiteout(in)) {
544 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
545 set_bkeyp_val_u64s(f, out, 0);
549 out = bkey_next(out);
552 return (u64 *) out - (u64 *) dst;
555 static inline int sort_extents_cmp(struct btree *b,
556 struct bkey_packed *l,
557 struct bkey_packed *r)
559 return bkey_cmp_packed(b, l, r) ?:
560 (int) bkey_deleted(l) - (int) bkey_deleted(r);
563 static unsigned sort_extents(struct bkey_packed *dst,
564 struct sort_iter *iter,
565 bool filter_whiteouts)
567 struct bkey_packed *in, *out = dst;
569 sort_iter_sort(iter, sort_extents_cmp);
571 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
572 if (bkey_deleted(in))
575 if (bkey_whiteout(in) &&
576 (filter_whiteouts || !in->needs_whiteout))
580 out = bkey_next(out);
583 return (u64 *) out - (u64 *) dst;
586 static void btree_node_sort(struct bch_fs *c, struct btree *b,
587 struct btree_iter *iter,
590 bool filter_whiteouts)
592 struct btree_node *out;
593 struct sort_iter sort_iter;
595 struct bset *start_bset = bset(b, &b->set[start_idx]);
596 bool used_mempool = false;
597 u64 start_time, seq = 0;
598 unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
599 bool sorting_entire_node = start_idx == 0 &&
602 sort_iter_init(&sort_iter, b);
604 for (t = b->set + start_idx;
605 t < b->set + end_idx;
607 u64s += le16_to_cpu(bset(b, t)->u64s);
608 sort_iter_add(&sort_iter,
609 btree_bkey_first(b, t),
610 btree_bkey_last(b, t));
613 order = sorting_entire_node
614 ? btree_page_order(c)
615 : get_order(__vstruct_bytes(struct btree_node, u64s));
617 out = btree_bounce_alloc(c, order, &used_mempool);
619 start_time = local_clock();
621 if (btree_node_is_extents(b))
622 filter_whiteouts = bset_written(b, start_bset);
624 u64s = btree_node_is_extents(b)
625 ? sort_extents(out->keys.start, &sort_iter, filter_whiteouts)
626 : sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
628 out->keys.u64s = cpu_to_le16(u64s);
630 BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
632 if (sorting_entire_node)
633 bch2_time_stats_update(&c->times[BCH_TIME_btree_sort],
636 /* Make sure we preserve bset journal_seq: */
637 for (t = b->set + start_idx; t < b->set + end_idx; t++)
638 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
639 start_bset->journal_seq = cpu_to_le64(seq);
641 if (sorting_entire_node) {
642 unsigned u64s = le16_to_cpu(out->keys.u64s);
644 BUG_ON(order != btree_page_order(c));
647 * Our temporary buffer is the same size as the btree node's
648 * buffer, we can just swap buffers instead of doing a big
652 out->keys.u64s = cpu_to_le16(u64s);
654 set_btree_bset(b, b->set, &b->data->keys);
656 start_bset->u64s = out->keys.u64s;
657 memcpy_u64s(start_bset->start,
659 le16_to_cpu(out->keys.u64s));
662 for (i = start_idx + 1; i < end_idx; i++)
663 b->nr.bset_u64s[start_idx] +=
668 for (i = start_idx + 1; i < b->nsets; i++) {
669 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
670 b->set[i] = b->set[i + shift];
673 for (i = b->nsets; i < MAX_BSETS; i++)
674 b->nr.bset_u64s[i] = 0;
676 set_btree_bset_end(b, &b->set[start_idx]);
677 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
679 btree_bounce_free(c, order, used_mempool, out);
681 bch2_verify_btree_nr_keys(b);
684 /* Sort + repack in a new format: */
685 static struct btree_nr_keys sort_repack(struct bset *dst,
687 struct btree_node_iter *src_iter,
688 struct bkey_format *out_f,
689 bool filter_whiteouts)
691 struct bkey_format *in_f = &src->format;
692 struct bkey_packed *in, *out = vstruct_last(dst);
693 struct btree_nr_keys nr;
695 memset(&nr, 0, sizeof(nr));
697 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
698 if (filter_whiteouts && bkey_whiteout(in))
701 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
702 ? in_f : &bch2_bkey_format_current, in))
703 out->format = KEY_FORMAT_LOCAL_BTREE;
705 bch2_bkey_unpack(src, (void *) out, in);
707 btree_keys_account_key_add(&nr, 0, out);
708 out = bkey_next(out);
711 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
715 /* Sort, repack, and merge: */
716 static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
719 struct btree_node_iter *iter,
720 struct bkey_format *out_f,
721 bool filter_whiteouts,
722 key_filter_fn filter,
725 struct bkey_packed *k, *prev = NULL, *out;
726 struct btree_nr_keys nr;
729 memset(&nr, 0, sizeof(nr));
731 while ((k = bch2_btree_node_iter_next_all(iter, src))) {
732 if (filter_whiteouts && bkey_whiteout(k))
736 * The filter might modify pointers, so we have to unpack the
737 * key and values to &tmp.k:
739 bch2_bkey_unpack(src, &tmp.k, k);
741 if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
744 /* prev is always unpacked, for key merging: */
748 merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE)
752 * the current key becomes the new prev: advance prev, then
753 * copy the current key - but first pack prev (in place):
756 bch2_bkey_pack(prev, (void *) prev, out_f);
758 btree_keys_account_key_add(&nr, 0, prev);
759 prev = bkey_next(prev);
761 prev = vstruct_last(dst);
764 bkey_copy(prev, &tmp.k);
768 bch2_bkey_pack(prev, (void *) prev, out_f);
769 btree_keys_account_key_add(&nr, 0, prev);
770 out = bkey_next(prev);
772 out = vstruct_last(dst);
775 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
779 void bch2_btree_sort_into(struct bch_fs *c,
783 struct btree_nr_keys nr;
784 struct btree_node_iter src_iter;
785 u64 start_time = local_clock();
787 BUG_ON(dst->nsets != 1);
789 bch2_bset_set_no_aux_tree(dst, dst->set);
791 bch2_btree_node_iter_init_from_start(&src_iter, src);
793 if (btree_node_ops(src)->key_normalize ||
794 btree_node_ops(src)->key_merge)
795 nr = sort_repack_merge(c, btree_bset_first(dst),
799 btree_node_ops(src)->key_normalize,
800 btree_node_ops(src)->key_merge);
802 nr = sort_repack(btree_bset_first(dst),
807 bch2_time_stats_update(&c->times[BCH_TIME_btree_sort], start_time);
809 set_btree_bset_end(dst, dst->set);
811 dst->nr.live_u64s += nr.live_u64s;
812 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
813 dst->nr.packed_keys += nr.packed_keys;
814 dst->nr.unpacked_keys += nr.unpacked_keys;
816 bch2_verify_btree_nr_keys(dst);
819 #define SORT_CRIT (4096 / sizeof(u64))
822 * We're about to add another bset to the btree node, so if there's currently
823 * too many bsets - sort some of them together:
825 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
826 struct btree_iter *iter)
828 unsigned unwritten_idx;
831 for (unwritten_idx = 0;
832 unwritten_idx < b->nsets;
834 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
837 if (b->nsets - unwritten_idx > 1) {
838 btree_node_sort(c, b, iter, unwritten_idx,
843 if (unwritten_idx > 1) {
844 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
851 void bch2_btree_build_aux_trees(struct btree *b)
856 bch2_bset_build_aux_tree(b, t,
857 !bset_written(b, bset(b, t)) &&
858 t == bset_tree_last(b));
862 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
865 * Safe to call if there already is an unwritten bset - will only add a new bset
866 * if @b doesn't already have one.
868 * Returns true if we sorted (i.e. invalidated iterators
870 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
871 struct btree_iter *iter)
873 struct btree_node_entry *bne;
876 EBUG_ON(!(b->lock.state.seq & 1));
877 EBUG_ON(iter && iter->l[b->level].b != b);
879 did_sort = btree_node_compact(c, b, iter);
881 bne = want_new_bset(c, b);
883 bch2_bset_init_next(c, b, bne);
885 bch2_btree_build_aux_trees(b);
887 if (iter && did_sort)
888 bch2_btree_iter_reinit_node(iter, b);
891 static struct nonce btree_nonce(struct bset *i, unsigned offset)
893 return (struct nonce) {{
894 [0] = cpu_to_le32(offset),
895 [1] = ((__le32 *) &i->seq)[0],
896 [2] = ((__le32 *) &i->seq)[1],
897 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
901 static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
903 struct nonce nonce = btree_nonce(i, offset);
906 struct btree_node *bn = container_of(i, struct btree_node, keys);
907 unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
909 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
912 nonce = nonce_add(nonce, round_up(bytes, CHACHA20_BLOCK_SIZE));
915 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
916 vstruct_end(i) - (void *) i->_data);
919 static int btree_err_msg(struct bch_fs *c, struct btree *b, struct bset *i,
920 unsigned offset, int write, char *buf, size_t len)
922 char *out = buf, *end = buf + len;
924 out += scnprintf(out, end - out,
925 "error validating btree node %s"
926 "at btree %u level %u/%u\n"
927 "pos %llu:%llu node offset %u",
928 write ? "before write " : "",
929 b->btree_id, b->level,
930 c->btree_roots[b->btree_id].level,
931 b->key.k.p.inode, b->key.k.p.offset,
934 out += scnprintf(out, end - out,
936 le16_to_cpu(i->u64s));
941 enum btree_err_type {
943 BTREE_ERR_WANT_RETRY,
944 BTREE_ERR_MUST_RETRY,
948 enum btree_validate_ret {
949 BTREE_RETRY_READ = 64,
952 #define btree_err(type, c, b, i, msg, ...) \
955 char _buf[300], *out = _buf, *end = out + sizeof(_buf); \
957 out += btree_err_msg(c, b, i, b->written, write, out, end - out);\
958 out += scnprintf(out, end - out, ": " msg, ##__VA_ARGS__); \
960 if (type == BTREE_ERR_FIXABLE && \
962 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
963 mustfix_fsck_err(c, "%s", _buf); \
969 bch_err(c, "%s", _buf); \
972 case BTREE_ERR_FIXABLE: \
973 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
975 case BTREE_ERR_WANT_RETRY: \
977 ret = BTREE_RETRY_READ; \
981 case BTREE_ERR_MUST_RETRY: \
982 ret = BTREE_RETRY_READ; \
984 case BTREE_ERR_FATAL: \
985 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
990 bch_err(c, "corrupt metadata before write: %s", _buf); \
992 if (bch2_fs_inconsistent(c)) { \
993 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
1002 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
1004 static int validate_bset(struct bch_fs *c, struct btree *b,
1005 struct bset *i, unsigned sectors,
1006 unsigned *whiteout_u64s, int write,
1009 struct bkey_packed *k, *prev = NULL;
1010 struct bpos prev_pos = POS_MIN;
1011 enum bkey_type type = btree_node_type(b);
1012 bool seen_non_whiteout = false;
1016 if (i == &b->data->keys) {
1017 /* These indicate that we read the wrong btree node: */
1018 btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
1019 BTREE_ERR_MUST_RETRY, c, b, i,
1020 "incorrect btree id");
1022 btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
1023 BTREE_ERR_MUST_RETRY, c, b, i,
1026 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
1027 u64 *p = (u64 *) &b->data->ptr;
1030 bch2_bpos_swab(&b->data->min_key);
1031 bch2_bpos_swab(&b->data->max_key);
1034 btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
1035 BTREE_ERR_MUST_RETRY, c, b, i,
1036 "incorrect max key");
1038 /* XXX: ideally we would be validating min_key too */
1041 * not correct anymore, due to btree node write error
1044 * need to add b->data->seq to btree keys and verify
1047 btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
1049 BTREE_ERR_FATAL, c, b, i,
1050 "incorrect backpointer");
1052 err = bch2_bkey_format_validate(&b->data->format);
1054 BTREE_ERR_FATAL, c, b, i,
1055 "invalid bkey format: %s", err);
1058 if (btree_err_on(le16_to_cpu(i->version) != BCACHE_BSET_VERSION,
1059 BTREE_ERR_FIXABLE, c, b, i,
1060 "unsupported bset version")) {
1061 i->version = cpu_to_le16(BCACHE_BSET_VERSION);
1066 if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
1067 BTREE_ERR_FIXABLE, c, b, i,
1068 "bset past end of btree node")) {
1073 btree_err_on(b->written && !i->u64s,
1074 BTREE_ERR_FIXABLE, c, b, i,
1077 if (!BSET_SEPARATE_WHITEOUTS(i)) {
1078 seen_non_whiteout = true;
1083 k != vstruct_last(i);) {
1086 const char *invalid;
1088 if (btree_err_on(!k->u64s,
1089 BTREE_ERR_FIXABLE, c, b, i,
1090 "KEY_U64s 0: %zu bytes of metadata lost",
1091 vstruct_end(i) - (void *) k)) {
1092 i->u64s = cpu_to_le16((u64 *) k - i->_data);
1096 if (btree_err_on(bkey_next(k) > vstruct_last(i),
1097 BTREE_ERR_FIXABLE, c, b, i,
1098 "key extends past end of bset")) {
1099 i->u64s = cpu_to_le16((u64 *) k - i->_data);
1103 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
1104 BTREE_ERR_FIXABLE, c, b, i,
1105 "invalid bkey format %u", k->format)) {
1106 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1107 memmove_u64s_down(k, bkey_next(k),
1108 (u64 *) vstruct_end(i) - (u64 *) k);
1112 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
1113 bch2_bkey_swab(type, &b->format, k);
1115 u = bkey_disassemble(b, k, &tmp);
1117 invalid = __bch2_bkey_invalid(c, type, u) ?:
1118 bch2_bkey_in_btree_node(b, u) ?:
1119 (write ? bch2_bkey_val_invalid(c, type, u) : NULL);
1123 bch2_bkey_val_to_text(c, type, buf, sizeof(buf), u);
1124 btree_err(BTREE_ERR_FIXABLE, c, b, i,
1125 "invalid bkey:\n%s\n%s", invalid, buf);
1127 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1128 memmove_u64s_down(k, bkey_next(k),
1129 (u64 *) vstruct_end(i) - (u64 *) k);
1134 * with the separate whiteouts thing (used for extents), the
1135 * second set of keys actually can have whiteouts too, so we
1136 * can't solely go off bkey_whiteout()...
1139 if (!seen_non_whiteout &&
1140 (!bkey_whiteout(k) ||
1141 (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
1142 *whiteout_u64s = k->_data - i->_data;
1143 seen_non_whiteout = true;
1144 } else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
1145 btree_err(BTREE_ERR_FATAL, c, b, i,
1146 "keys out of order: %llu:%llu > %llu:%llu",
1150 bkey_start_offset(u.k));
1151 /* XXX: repair this */
1159 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1164 int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
1166 struct btree_node_entry *bne;
1167 struct btree_node_iter_large *iter;
1168 struct btree_node *sorted;
1169 struct bkey_packed *k;
1173 int ret, retry_read = 0, write = READ;
1175 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
1178 if (bch2_meta_read_fault("btree"))
1179 btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
1182 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1183 BTREE_ERR_MUST_RETRY, c, b, NULL,
1186 btree_err_on(!b->data->keys.seq,
1187 BTREE_ERR_MUST_RETRY, c, b, NULL,
1188 "bad btree header");
1190 while (b->written < c->opts.btree_node_size) {
1191 unsigned sectors, whiteout_u64s = 0;
1193 struct bch_csum csum;
1194 bool first = !b->written;
1199 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1200 BTREE_ERR_WANT_RETRY, c, b, i,
1201 "unknown checksum type");
1203 nonce = btree_nonce(i, b->written << 9);
1204 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1206 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
1207 BTREE_ERR_WANT_RETRY, c, b, i,
1208 "invalid checksum");
1210 bset_encrypt(c, i, b->written << 9);
1212 sectors = vstruct_sectors(b->data, c->block_bits);
1214 btree_node_set_format(b, b->data->format);
1216 bne = write_block(b);
1219 if (i->seq != b->data->keys.seq)
1222 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1223 BTREE_ERR_WANT_RETRY, c, b, i,
1224 "unknown checksum type");
1226 nonce = btree_nonce(i, b->written << 9);
1227 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1229 btree_err_on(bch2_crc_cmp(csum, bne->csum),
1230 BTREE_ERR_WANT_RETRY, c, b, i,
1231 "invalid checksum");
1233 bset_encrypt(c, i, b->written << 9);
1235 sectors = vstruct_sectors(bne, c->block_bits);
1238 ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
1243 b->written += sectors;
1245 ret = bch2_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
1247 btree_err(BTREE_ERR_FATAL, c, b, i,
1248 "insufficient memory");
1254 BTREE_ERR_FIXABLE, c, b, i,
1255 "first btree node bset has blacklisted journal seq");
1260 bch2_btree_node_iter_large_push(iter, b,
1262 vstruct_idx(i, whiteout_u64s));
1264 bch2_btree_node_iter_large_push(iter, b,
1265 vstruct_idx(i, whiteout_u64s),
1269 for (bne = write_block(b);
1270 bset_byte_offset(b, bne) < btree_bytes(c);
1271 bne = (void *) bne + block_bytes(c))
1272 btree_err_on(bne->keys.seq == b->data->keys.seq,
1273 BTREE_ERR_WANT_RETRY, c, b, NULL,
1274 "found bset signature after last bset");
1276 sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
1277 sorted->keys.u64s = 0;
1279 set_btree_bset(b, b->set, &b->data->keys);
1281 b->nr = btree_node_is_extents(b)
1282 ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
1283 : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
1285 u64s = le16_to_cpu(sorted->keys.u64s);
1287 sorted->keys.u64s = cpu_to_le16(u64s);
1288 swap(sorted, b->data);
1289 set_btree_bset(b, b->set, &b->data->keys);
1292 BUG_ON(b->nr.live_u64s != u64s);
1294 btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
1297 for (k = i->start; k != vstruct_last(i);) {
1298 enum bkey_type type = btree_node_type(b);
1300 struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
1301 const char *invalid = bch2_bkey_val_invalid(c, type, u);
1304 (inject_invalid_keys(c) &&
1305 !bversion_cmp(u.k->version, MAX_VERSION))) {
1308 bch2_bkey_val_to_text(c, type, buf, sizeof(buf), u);
1309 btree_err(BTREE_ERR_FIXABLE, c, b, i,
1310 "invalid bkey %s: %s", buf, invalid);
1312 btree_keys_account_key_drop(&b->nr, 0, k);
1314 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1315 memmove_u64s_down(k, bkey_next(k),
1316 (u64 *) vstruct_end(i) - (u64 *) k);
1317 set_btree_bset_end(b, b->set);
1324 bch2_bset_build_aux_tree(b, b->set, false);
1326 set_needs_whiteout(btree_bset_first(b));
1328 btree_node_reset_sib_u64s(b);
1330 mempool_free(iter, &c->fill_iter);
1334 if (ret == BTREE_RETRY_READ) {
1337 bch2_inconsistent_error(c);
1338 set_btree_node_read_error(b);
1343 static void btree_node_read_work(struct work_struct *work)
1345 struct btree_read_bio *rb =
1346 container_of(work, struct btree_read_bio, work);
1347 struct bch_fs *c = rb->c;
1348 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1349 struct btree *b = rb->bio.bi_private;
1350 struct bio *bio = &rb->bio;
1351 struct bch_devs_mask avoid;
1354 memset(&avoid, 0, sizeof(avoid));
1358 bch_info(c, "retrying read");
1359 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1360 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1362 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1363 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1364 bio->bi_iter.bi_size = btree_bytes(c);
1366 if (rb->have_ioref) {
1367 bio_set_dev(bio, ca->disk_sb.bdev);
1368 submit_bio_wait(bio);
1370 bio->bi_status = BLK_STS_REMOVED;
1373 bch2_dev_io_err_on(bio->bi_status, ca, "btree read");
1375 percpu_ref_put(&ca->io_ref);
1376 rb->have_ioref = false;
1378 __set_bit(rb->pick.ptr.dev, avoid.d);
1379 can_retry = bch2_btree_pick_ptr(c, b, &avoid, &rb->pick) > 0;
1381 if (!bio->bi_status &&
1382 !bch2_btree_node_read_done(c, b, can_retry))
1386 set_btree_node_read_error(b);
1391 bch2_time_stats_update(&c->times[BCH_TIME_btree_read], rb->start_time);
1393 clear_btree_node_read_in_flight(b);
1394 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1397 static void btree_node_read_endio(struct bio *bio)
1399 struct btree_read_bio *rb =
1400 container_of(bio, struct btree_read_bio, bio);
1401 struct bch_fs *c = rb->c;
1403 if (rb->have_ioref) {
1404 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1405 bch2_latency_acct(ca, rb->start_time, READ);
1408 queue_work(system_unbound_wq, &rb->work);
1411 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1414 struct extent_pick_ptr pick;
1415 struct btree_read_bio *rb;
1420 trace_btree_read(c, b);
1422 ret = bch2_btree_pick_ptr(c, b, NULL, &pick);
1423 if (bch2_fs_fatal_err_on(ret <= 0, c,
1424 "btree node read error: no device to read from")) {
1425 set_btree_node_read_error(b);
1429 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1431 bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio);
1432 rb = container_of(bio, struct btree_read_bio, bio);
1434 rb->start_time = local_clock();
1435 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1437 INIT_WORK(&rb->work, btree_node_read_work);
1438 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1439 bio->bi_iter.bi_sector = pick.ptr.offset;
1440 bio->bi_iter.bi_size = btree_bytes(c);
1441 bio->bi_end_io = btree_node_read_endio;
1442 bio->bi_private = b;
1443 bch2_bio_map(bio, b->data);
1445 set_btree_node_read_in_flight(b);
1447 if (rb->have_ioref) {
1448 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE],
1450 bio_set_dev(bio, ca->disk_sb.bdev);
1453 submit_bio_wait(bio);
1455 bio->bi_private = b;
1456 btree_node_read_work(&rb->work);
1461 bio->bi_status = BLK_STS_REMOVED;
1464 btree_node_read_work(&rb->work);
1466 queue_work(system_unbound_wq, &rb->work);
1471 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1472 const struct bkey_i *k, unsigned level)
1478 closure_init_stack(&cl);
1481 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1485 b = bch2_btree_node_mem_alloc(c);
1486 bch2_btree_cache_cannibalize_unlock(c);
1490 bkey_copy(&b->key, k);
1491 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1493 bch2_btree_node_read(c, b, true);
1495 if (btree_node_read_error(b)) {
1496 bch2_btree_node_hash_remove(&c->btree_cache, b);
1498 mutex_lock(&c->btree_cache.lock);
1499 list_move(&b->list, &c->btree_cache.freeable);
1500 mutex_unlock(&c->btree_cache.lock);
1506 bch2_btree_set_root_for_read(c, b);
1508 six_unlock_write(&b->lock);
1509 six_unlock_intent(&b->lock);
1514 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1515 struct btree_write *w)
1517 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1525 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1528 closure_put(&((struct btree_update *) new)->cl);
1530 bch2_journal_pin_drop(&c->journal, &w->journal);
1531 closure_wake_up(&w->wait);
1534 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1536 struct btree_write *w = btree_prev_write(b);
1538 bch2_btree_complete_write(c, b, w);
1539 btree_node_io_unlock(b);
1542 static void bch2_btree_node_write_error(struct bch_fs *c,
1543 struct btree_write_bio *wbio)
1545 struct btree *b = wbio->wbio.bio.bi_private;
1546 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1547 struct bkey_i_extent *new_key;
1548 struct bkey_s_extent e;
1549 struct bch_extent_ptr *ptr;
1550 struct btree_iter iter;
1553 __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
1555 b->level, BTREE_ITER_NODES);
1557 ret = bch2_btree_iter_traverse(&iter);
1561 /* has node been freed? */
1562 if (iter.l[b->level].b != b) {
1563 /* node has been freed: */
1564 BUG_ON(!btree_node_dying(b));
1568 BUG_ON(!btree_node_hashed(b));
1570 bkey_copy(&tmp.k, &b->key);
1572 new_key = bkey_i_to_extent(&tmp.k);
1573 e = extent_i_to_s(new_key);
1574 extent_for_each_ptr_backwards(e, ptr)
1575 if (bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev))
1576 bch2_extent_drop_ptr(e, ptr);
1578 if (!bch2_extent_nr_ptrs(e.c))
1581 ret = bch2_btree_node_update_key(c, &iter, b, new_key);
1587 bch2_btree_iter_unlock(&iter);
1588 bio_put(&wbio->wbio.bio);
1589 btree_node_write_done(c, b);
1592 set_btree_node_noevict(b);
1593 bch2_fs_fatal_error(c, "fatal error writing btree node");
1597 void bch2_btree_write_error_work(struct work_struct *work)
1599 struct bch_fs *c = container_of(work, struct bch_fs,
1600 btree_write_error_work);
1604 spin_lock_irq(&c->btree_write_error_lock);
1605 bio = bio_list_pop(&c->btree_write_error_list);
1606 spin_unlock_irq(&c->btree_write_error_lock);
1611 bch2_btree_node_write_error(c,
1612 container_of(bio, struct btree_write_bio, wbio.bio));
1616 static void btree_node_write_work(struct work_struct *work)
1618 struct btree_write_bio *wbio =
1619 container_of(work, struct btree_write_bio, work);
1620 struct bch_fs *c = wbio->wbio.c;
1621 struct btree *b = wbio->wbio.bio.bi_private;
1623 btree_bounce_free(c,
1625 wbio->wbio.used_mempool,
1628 if (wbio->wbio.failed.nr) {
1629 unsigned long flags;
1631 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1632 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1633 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1635 queue_work(c->wq, &c->btree_write_error_work);
1639 bio_put(&wbio->wbio.bio);
1640 btree_node_write_done(c, b);
1643 static void btree_node_write_endio(struct bio *bio)
1645 struct bch_write_bio *wbio = to_wbio(bio);
1646 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1647 struct bch_write_bio *orig = parent ?: wbio;
1648 struct bch_fs *c = wbio->c;
1649 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
1650 unsigned long flags;
1652 if (wbio->have_ioref)
1653 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1655 if (bio->bi_status == BLK_STS_REMOVED ||
1656 bch2_dev_io_err_on(bio->bi_status, ca, "btree write") ||
1657 bch2_meta_write_fault("btree")) {
1658 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1659 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1660 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1663 if (wbio->have_ioref)
1664 percpu_ref_put(&ca->io_ref);
1668 bio_endio(&parent->bio);
1670 struct btree_write_bio *wb =
1671 container_of(orig, struct btree_write_bio, wbio);
1673 INIT_WORK(&wb->work, btree_node_write_work);
1674 queue_work(system_unbound_wq, &wb->work);
1678 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1679 struct bset *i, unsigned sectors)
1681 const struct bch_extent_ptr *ptr;
1682 unsigned whiteout_u64s = 0;
1685 extent_for_each_ptr(bkey_i_to_s_c_extent(&b->key), ptr)
1688 ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
1690 bch2_inconsistent_error(c);
1695 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1696 enum six_lock_type lock_type_held)
1698 struct btree_write_bio *wbio;
1699 struct bset_tree *t;
1701 struct btree_node *bn = NULL;
1702 struct btree_node_entry *bne = NULL;
1704 struct bkey_s_extent e;
1705 struct bch_extent_ptr *ptr;
1706 struct sort_iter sort_iter;
1708 unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
1711 unsigned long old, new;
1714 if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1718 * We may only have a read lock on the btree node - the dirty bit is our
1719 * "lock" against racing with other threads that may be trying to start
1720 * a write, we do a write iff we clear the dirty bit. Since setting the
1721 * dirty bit requires a write lock, we can't race with other threads
1725 old = new = READ_ONCE(b->flags);
1727 if (!(old & (1 << BTREE_NODE_dirty)))
1731 !btree_node_may_write(b))
1734 if (old & (1 << BTREE_NODE_write_in_flight)) {
1735 btree_node_wait_on_io(b);
1739 new &= ~(1 << BTREE_NODE_dirty);
1740 new &= ~(1 << BTREE_NODE_need_write);
1741 new |= (1 << BTREE_NODE_write_in_flight);
1742 new |= (1 << BTREE_NODE_just_written);
1743 new ^= (1 << BTREE_NODE_write_idx);
1744 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1746 BUG_ON(btree_node_fake(b));
1747 BUG_ON(!list_empty(&b->write_blocked));
1748 BUG_ON((b->will_make_reachable != 0) != !b->written);
1750 BUG_ON(b->written >= c->opts.btree_node_size);
1751 BUG_ON(b->written & (c->opts.block_size - 1));
1752 BUG_ON(bset_written(b, btree_bset_last(b)));
1753 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1754 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1757 * We can't block on six_lock_write() here; another thread might be
1758 * trying to get a journal reservation with read locks held, and getting
1759 * a journal reservation might be blocked on flushing the journal and
1760 * doing btree writes:
1762 if (lock_type_held == SIX_LOCK_intent &&
1763 six_trylock_write(&b->lock)) {
1764 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
1765 six_unlock_write(&b->lock);
1767 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
1770 BUG_ON(b->uncompacted_whiteout_u64s);
1772 sort_iter_init(&sort_iter, b);
1775 ? sizeof(struct btree_node)
1776 : sizeof(struct btree_node_entry);
1778 bytes += b->whiteout_u64s * sizeof(u64);
1780 for_each_bset(b, t) {
1783 if (bset_written(b, i))
1786 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1787 sort_iter_add(&sort_iter,
1788 btree_bkey_first(b, t),
1789 btree_bkey_last(b, t));
1790 seq = max(seq, le64_to_cpu(i->journal_seq));
1793 order = get_order(bytes);
1794 data = btree_bounce_alloc(c, order, &used_mempool);
1802 bne->keys = b->data->keys;
1806 i->journal_seq = cpu_to_le64(seq);
1809 if (!btree_node_is_extents(b)) {
1810 sort_iter_add(&sort_iter,
1811 unwritten_whiteouts_start(c, b),
1812 unwritten_whiteouts_end(c, b));
1813 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1815 memcpy_u64s(i->start,
1816 unwritten_whiteouts_start(c, b),
1818 i->u64s = cpu_to_le16(b->whiteout_u64s);
1819 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1822 b->whiteout_u64s = 0;
1824 u64s = btree_node_is_extents(b)
1825 ? sort_extents(vstruct_last(i), &sort_iter, false)
1826 : sort_keys(i->start, &sort_iter, false);
1827 le16_add_cpu(&i->u64s, u64s);
1829 clear_needs_whiteout(i);
1831 /* do we have data to write? */
1832 if (b->written && !i->u64s)
1835 bytes_to_write = vstruct_end(i) - data;
1836 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1838 memset(data + bytes_to_write, 0,
1839 (sectors_to_write << 9) - bytes_to_write);
1841 BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1842 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1843 BUG_ON(i->seq != b->data->keys.seq);
1845 i->version = cpu_to_le16(BCACHE_BSET_VERSION);
1846 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1848 /* if we're going to be encrypting, check metadata validity first: */
1849 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
1850 validate_bset_for_write(c, b, i, sectors_to_write))
1853 bset_encrypt(c, i, b->written << 9);
1855 nonce = btree_nonce(i, b->written << 9);
1858 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1860 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1862 /* if we're not encrypting, check metadata after checksumming: */
1863 if (!bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
1864 validate_bset_for_write(c, b, i, sectors_to_write))
1868 * We handle btree write errors by immediately halting the journal -
1869 * after we've done that, we can't issue any subsequent btree writes
1870 * because they might have pointers to new nodes that failed to write.
1872 * Furthermore, there's no point in doing any more btree writes because
1873 * with the journal stopped, we're never going to update the journal to
1874 * reflect that those writes were done and the data flushed from the
1877 * Make sure to update b->written so bch2_btree_init_next() doesn't
1880 if (bch2_journal_error(&c->journal) ||
1884 trace_btree_write(b, bytes_to_write, sectors_to_write);
1886 wbio = container_of(bio_alloc_bioset(GFP_NOIO, 1 << order, &c->btree_bio),
1887 struct btree_write_bio, wbio.bio);
1888 wbio_init(&wbio->wbio.bio);
1890 wbio->wbio.order = order;
1891 wbio->wbio.used_mempool = used_mempool;
1892 wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
1893 wbio->wbio.bio.bi_iter.bi_size = sectors_to_write << 9;
1894 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
1895 wbio->wbio.bio.bi_private = b;
1897 bch2_bio_map(&wbio->wbio.bio, data);
1900 * If we're appending to a leaf node, we don't technically need FUA -
1901 * this write just needs to be persisted before the next journal write,
1902 * which will be marked FLUSH|FUA.
1904 * Similarly if we're writing a new btree root - the pointer is going to
1905 * be in the next journal entry.
1907 * But if we're writing a new btree node (that isn't a root) or
1908 * appending to a non leaf btree node, we need either FUA or a flush
1909 * when we write the parent with the new pointer. FUA is cheaper than a
1910 * flush, and writes appending to leaf nodes aren't blocking anything so
1911 * just make all btree node writes FUA to keep things sane.
1914 bkey_copy(&k.key, &b->key);
1915 e = bkey_i_to_s_extent(&k.key);
1917 extent_for_each_ptr(e, ptr)
1918 ptr->offset += b->written;
1920 b->written += sectors_to_write;
1922 bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
1925 set_btree_node_noevict(b);
1926 b->written += sectors_to_write;
1928 btree_bounce_free(c, order, used_mempool, data);
1929 btree_node_write_done(c, b);
1933 * Work that must be done with write lock held:
1935 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1937 bool invalidated_iter = false;
1938 struct btree_node_entry *bne;
1939 struct bset_tree *t;
1941 if (!btree_node_just_written(b))
1944 BUG_ON(b->whiteout_u64s);
1945 BUG_ON(b->uncompacted_whiteout_u64s);
1947 clear_btree_node_just_written(b);
1950 * Note: immediately after write, bset_written() doesn't work - the
1951 * amount of data we had to write after compaction might have been
1952 * smaller than the offset of the last bset.
1954 * However, we know that all bsets have been written here, as long as
1955 * we're still holding the write lock:
1959 * XXX: decide if we really want to unconditionally sort down to a
1963 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1964 invalidated_iter = true;
1966 invalidated_iter = bch2_drop_whiteouts(b);
1970 set_needs_whiteout(bset(b, t));
1972 bch2_btree_verify(c, b);
1975 * If later we don't unconditionally sort down to a single bset, we have
1976 * to ensure this is still true:
1978 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1980 bne = want_new_bset(c, b);
1982 bch2_bset_init_next(c, b, bne);
1984 bch2_btree_build_aux_trees(b);
1986 return invalidated_iter;
1990 * Use this one if the node is intent locked:
1992 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1993 enum six_lock_type lock_type_held)
1995 BUG_ON(lock_type_held == SIX_LOCK_write);
1997 if (lock_type_held == SIX_LOCK_intent ||
1998 six_lock_tryupgrade(&b->lock)) {
1999 __bch2_btree_node_write(c, b, SIX_LOCK_intent);
2001 /* don't cycle lock unnecessarily: */
2002 if (btree_node_just_written(b) &&
2003 six_trylock_write(&b->lock)) {
2004 bch2_btree_post_write_cleanup(c, b);
2005 six_unlock_write(&b->lock);
2008 if (lock_type_held == SIX_LOCK_read)
2009 six_lock_downgrade(&b->lock);
2011 __bch2_btree_node_write(c, b, SIX_LOCK_read);
2015 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2017 struct bucket_table *tbl;
2018 struct rhash_head *pos;
2023 for_each_cached_btree(b, c, tbl, i, pos)
2024 if (test_bit(flag, &b->flags)) {
2026 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2033 void bch2_btree_flush_all_reads(struct bch_fs *c)
2035 __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2038 void bch2_btree_flush_all_writes(struct bch_fs *c)
2040 __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2043 void bch2_btree_verify_flushed(struct bch_fs *c)
2045 struct bucket_table *tbl;
2046 struct rhash_head *pos;
2051 for_each_cached_btree(b, c, tbl, i, pos) {
2052 unsigned long flags = READ_ONCE(b->flags);
2054 BUG_ON((flags & (1 << BTREE_NODE_dirty)) ||
2055 (flags & (1 << BTREE_NODE_write_in_flight)));
2060 ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
2062 char *out = buf, *end = buf + PAGE_SIZE;
2063 struct bucket_table *tbl;
2064 struct rhash_head *pos;
2069 for_each_cached_btree(b, c, tbl, i, pos) {
2070 unsigned long flags = READ_ONCE(b->flags);
2071 unsigned idx = (flags & (1 << BTREE_NODE_write_idx)) != 0;
2073 if (//!(flags & (1 << BTREE_NODE_dirty)) &&
2074 !b->writes[0].wait.list.first &&
2075 !b->writes[1].wait.list.first &&
2076 !(b->will_make_reachable & 1))
2079 out += scnprintf(out, end - out, "%p d %u l %u w %u b %u r %u:%lu c %u p %u\n",
2081 (flags & (1 << BTREE_NODE_dirty)) != 0,
2084 !list_empty_careful(&b->write_blocked),
2085 b->will_make_reachable != 0,
2086 b->will_make_reachable & 1,
2087 b->writes[ idx].wait.list.first != NULL,
2088 b->writes[!idx].wait.list.first != NULL);