2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 * Code for managing the extent btree and dynamically updating the writeback
9 #include "bkey_methods.h"
11 #include "btree_update.h"
12 #include "btree_update_interior.h"
17 #include "disk_groups.h"
28 #include <trace/events/bcachefs.h>
30 static void sort_key_next(struct btree_node_iter_large *iter,
32 struct btree_node_iter_set *i)
34 i->k += __btree_node_offset_to_key(b, i->k)->u64s;
37 *i = iter->data[--iter->used];
41 * Returns true if l > r - unless l == r, in which case returns true if l is
44 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
45 * equal in different sets, we have to process them newest to oldest.
47 #define key_sort_cmp(h, l, r) \
50 __btree_node_offset_to_key(b, (l).k), \
51 __btree_node_offset_to_key(b, (r).k)) \
56 static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
59 struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
60 struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
69 key_sort_cmp(iter, r[0], r[1]) >= 0)
73 * key_sort_cmp() ensures that when keys compare equal the older key
74 * comes first; so if l->k compares equal to r->k then l->k is older and
77 return !bkey_cmp_packed(b,
78 __btree_node_offset_to_key(b, l->k),
79 __btree_node_offset_to_key(b, r->k));
82 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
84 struct btree_node_iter_large *iter)
86 struct bkey_packed *out = dst->start;
87 struct btree_nr_keys nr;
89 memset(&nr, 0, sizeof(nr));
91 heap_resort(iter, key_sort_cmp);
93 while (!bch2_btree_node_iter_large_end(iter)) {
94 if (!should_drop_next_key(iter, b)) {
95 struct bkey_packed *k =
96 __btree_node_offset_to_key(b, iter->data->k);
99 btree_keys_account_key_add(&nr, 0, out);
100 out = bkey_next(out);
103 sort_key_next(iter, b, iter->data);
104 heap_sift_down(iter, 0, key_sort_cmp);
107 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
111 /* Common among btree and extent ptrs */
113 const struct bch_extent_ptr *
114 bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
116 const struct bch_extent_ptr *ptr;
118 extent_for_each_ptr(e, ptr)
125 bool bch2_extent_drop_device(struct bkey_s_extent e, unsigned dev)
127 struct bch_extent_ptr *ptr;
128 bool dropped = false;
130 extent_for_each_ptr_backwards(e, ptr)
131 if (ptr->dev == dev) {
132 __bch2_extent_drop_ptr(e, ptr);
137 bch2_extent_drop_redundant_crcs(e);
141 const struct bch_extent_ptr *
142 bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
144 const struct bch_extent_ptr *ptr;
146 extent_for_each_ptr(e, ptr) {
147 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
150 ca->mi.group - 1 == group)
157 const struct bch_extent_ptr *
158 bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
160 const struct bch_extent_ptr *ptr;
162 extent_for_each_ptr(e, ptr)
163 if (bch2_dev_in_target(c, ptr->dev, target) &&
165 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
171 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
173 const struct bch_extent_ptr *ptr;
174 unsigned nr_ptrs = 0;
176 extent_for_each_ptr(e, ptr)
182 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k)
184 struct bkey_s_c_extent e;
185 const struct bch_extent_ptr *ptr;
186 unsigned nr_ptrs = 0;
190 case BCH_EXTENT_CACHED:
191 e = bkey_s_c_to_extent(k);
193 extent_for_each_ptr(e, ptr)
194 nr_ptrs += !ptr->cached;
197 case BCH_RESERVATION:
198 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
205 unsigned bch2_extent_ptr_durability(struct bch_fs *c,
206 const struct bch_extent_ptr *ptr)
213 ca = bch_dev_bkey_exists(c, ptr->dev);
215 if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
218 return ca->mi.durability;
221 unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e)
223 const struct bch_extent_ptr *ptr;
224 unsigned durability = 0;
226 extent_for_each_ptr(e, ptr)
227 durability += bch2_extent_ptr_durability(c, ptr);
232 unsigned bch2_extent_is_compressed(struct bkey_s_c k)
234 struct bkey_s_c_extent e;
235 const struct bch_extent_ptr *ptr;
236 struct bch_extent_crc_unpacked crc;
241 case BCH_EXTENT_CACHED:
242 e = bkey_s_c_to_extent(k);
244 extent_for_each_ptr_crc(e, ptr, crc)
246 crc.compression_type != BCH_COMPRESSION_NONE &&
247 crc.compressed_size < crc.live_size)
248 ret = max_t(unsigned, ret, crc.compressed_size);
254 bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
255 struct bch_extent_ptr m, u64 offset)
257 const struct bch_extent_ptr *ptr;
258 struct bch_extent_crc_unpacked crc;
260 extent_for_each_ptr_crc(e, ptr, crc)
261 if (ptr->dev == m.dev &&
263 (s64) ptr->offset + crc.offset - bkey_start_offset(e.k) ==
264 (s64) m.offset - offset)
270 /* Doesn't cleanup redundant crcs */
271 void __bch2_extent_drop_ptr(struct bkey_s_extent e, struct bch_extent_ptr *ptr)
273 EBUG_ON(ptr < &e.v->start->ptr ||
274 ptr >= &extent_entry_last(e)->ptr);
275 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
276 memmove_u64s_down(ptr, ptr + 1,
277 (u64 *) extent_entry_last(e) - (u64 *) (ptr + 1));
278 e.k->u64s -= sizeof(*ptr) / sizeof(u64);
281 void bch2_extent_drop_ptr(struct bkey_s_extent e, struct bch_extent_ptr *ptr)
283 __bch2_extent_drop_ptr(e, ptr);
284 bch2_extent_drop_redundant_crcs(e);
287 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
288 struct bch_extent_crc_unpacked n)
290 return !u.compression_type &&
292 u.uncompressed_size > u.live_size &&
293 bch2_csum_type_is_encryption(u.csum_type) ==
294 bch2_csum_type_is_encryption(n.csum_type);
297 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
298 struct bch_extent_crc_unpacked n)
300 struct bch_extent_crc_unpacked crc;
301 const union bch_extent_entry *i;
306 extent_for_each_crc(e, crc, i)
307 if (can_narrow_crc(crc, n))
314 * We're writing another replica for this extent, so while we've got the data in
315 * memory we'll be computing a new checksum for the currently live data.
317 * If there are other replicas we aren't moving, and they are checksummed but
318 * not compressed, we can modify them to point to only the data that is
319 * currently live (so that readers won't have to bounce) while we've got the
322 bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
323 struct bch_extent_crc_unpacked n)
325 struct bch_extent_crc_unpacked u;
326 struct bch_extent_ptr *ptr;
327 union bch_extent_entry *i;
329 /* Find a checksum entry that covers only live data: */
331 extent_for_each_crc(extent_i_to_s(e), u, i)
332 if (!u.compression_type &&
334 u.live_size == u.uncompressed_size) {
339 if (!bch2_can_narrow_extent_crcs(extent_i_to_s_c(e), n))
342 BUG_ON(n.compression_type);
344 BUG_ON(n.live_size != e->k.size);
346 bch2_extent_crc_append(e, n);
347 restart_narrow_pointers:
348 extent_for_each_ptr_crc(extent_i_to_s(e), ptr, u)
349 if (can_narrow_crc(u, n)) {
350 ptr->offset += u.offset;
351 extent_ptr_append(e, *ptr);
352 __bch2_extent_drop_ptr(extent_i_to_s(e), ptr);
353 goto restart_narrow_pointers;
356 bch2_extent_drop_redundant_crcs(extent_i_to_s(e));
360 /* returns true if not equal */
361 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
362 struct bch_extent_crc_unpacked r)
364 return (l.csum_type != r.csum_type ||
365 l.compression_type != r.compression_type ||
366 l.compressed_size != r.compressed_size ||
367 l.uncompressed_size != r.uncompressed_size ||
368 l.offset != r.offset ||
369 l.live_size != r.live_size ||
370 l.nonce != r.nonce ||
371 bch2_crc_cmp(l.csum, r.csum));
374 void bch2_extent_drop_redundant_crcs(struct bkey_s_extent e)
376 union bch_extent_entry *entry = e.v->start;
377 union bch_extent_crc *crc, *prev = NULL;
378 struct bch_extent_crc_unpacked u, prev_u = { 0 };
380 while (entry != extent_entry_last(e)) {
381 union bch_extent_entry *next = extent_entry_next(entry);
382 size_t crc_u64s = extent_entry_u64s(entry);
384 if (!extent_entry_is_crc(entry))
387 crc = entry_to_crc(entry);
388 u = bch2_extent_crc_unpack(e.k, crc);
390 if (next == extent_entry_last(e)) {
391 /* crc entry with no pointers after it: */
395 if (extent_entry_is_crc(next)) {
396 /* no pointers before next crc entry: */
400 if (prev && !bch2_crc_unpacked_cmp(u, prev_u)) {
401 /* identical to previous crc entry: */
407 !u.compression_type) {
408 /* null crc entry: */
409 union bch_extent_entry *e2;
411 extent_for_each_entry_from(e, e2, extent_entry_next(entry)) {
412 if (!extent_entry_is_ptr(e2))
415 e2->ptr.offset += u.offset;
426 memmove_u64s_down(crc, next,
427 (u64 *) extent_entry_last(e) - (u64 *) next);
428 e.k->u64s -= crc_u64s;
431 EBUG_ON(bkey_val_u64s(e.k) && !bch2_extent_nr_ptrs(e.c));
434 static bool should_drop_ptr(const struct bch_fs *c,
435 struct bkey_s_c_extent e,
436 const struct bch_extent_ptr *ptr)
438 return ptr->cached && ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr);
441 static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
443 struct bch_extent_ptr *ptr = &e.v->start->ptr;
444 bool dropped = false;
446 while ((ptr = extent_ptr_next(e, ptr)))
447 if (should_drop_ptr(c, e.c, ptr)) {
448 __bch2_extent_drop_ptr(e, ptr);
454 bch2_extent_drop_redundant_crcs(e);
457 bool bch2_ptr_normalize(struct bch_fs *c, struct btree *b, struct bkey_s k)
459 return bch2_extent_normalize(c, k);
462 void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
466 case BCH_EXTENT_CACHED: {
467 union bch_extent_entry *entry;
468 u64 *d = (u64 *) bkeyp_val(f, k);
471 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
474 for (entry = (union bch_extent_entry *) d;
475 entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
476 entry = extent_entry_next(entry)) {
477 switch (extent_entry_type(entry)) {
478 case BCH_EXTENT_ENTRY_crc32:
479 entry->crc32.csum = swab32(entry->crc32.csum);
481 case BCH_EXTENT_ENTRY_crc64:
482 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
483 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
485 case BCH_EXTENT_ENTRY_crc128:
486 entry->crc128.csum.hi = (__force __le64)
487 swab64((__force u64) entry->crc128.csum.hi);
488 entry->crc128.csum.lo = (__force __le64)
489 swab64((__force u64) entry->crc128.csum.lo);
491 case BCH_EXTENT_ENTRY_ptr:
500 static const char *extent_ptr_invalid(const struct bch_fs *c,
501 struct bkey_s_c_extent e,
502 const struct bch_extent_ptr *ptr,
503 unsigned size_ondisk,
506 const struct bch_extent_ptr *ptr2;
509 if (ptr->dev >= c->sb.nr_devices ||
511 return "pointer to invalid device";
513 ca = bch_dev_bkey_exists(c, ptr->dev);
515 return "pointer to invalid device";
517 extent_for_each_ptr(e, ptr2)
518 if (ptr != ptr2 && ptr->dev == ptr2->dev)
519 return "multiple pointers to same device";
521 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
522 return "offset past end of device";
524 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
525 return "offset before first bucket";
527 if (bucket_remainder(ca, ptr->offset) +
528 size_ondisk > ca->mi.bucket_size)
529 return "spans multiple buckets";
534 static size_t extent_print_ptrs(struct bch_fs *c, char *buf,
535 size_t size, struct bkey_s_c_extent e)
537 char *out = buf, *end = buf + size;
538 const union bch_extent_entry *entry;
539 struct bch_extent_crc_unpacked crc;
540 const struct bch_extent_ptr *ptr;
544 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
546 extent_for_each_entry(e, entry) {
550 switch (__extent_entry_type(entry)) {
551 case BCH_EXTENT_ENTRY_crc32:
552 case BCH_EXTENT_ENTRY_crc64:
553 case BCH_EXTENT_ENTRY_crc128:
554 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
556 p("crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
558 crc.uncompressed_size,
559 crc.offset, crc.nonce,
561 crc.compression_type);
563 case BCH_EXTENT_ENTRY_ptr:
564 ptr = entry_to_ptr(entry);
565 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
566 ? bch_dev_bkey_exists(c, ptr->dev)
569 p("ptr: %u:%llu gen %u%s%s", ptr->dev,
570 (u64) ptr->offset, ptr->gen,
571 ptr->cached ? " cached" : "",
572 ca && ptr_stale(ca, ptr)
576 p("(invalid extent entry %.16llx)", *((u64 *) entry));
583 if (bkey_extent_is_cached(e.k))
589 static inline bool dev_latency_better(struct bch_fs *c,
590 const struct bch_extent_ptr *ptr1,
591 const struct bch_extent_ptr *ptr2)
593 struct bch_dev *dev1 = bch_dev_bkey_exists(c, ptr1->dev);
594 struct bch_dev *dev2 = bch_dev_bkey_exists(c, ptr2->dev);
595 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
596 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
598 /* Pick at random, biased in favor of the faster device: */
600 return bch2_rand_range(l1 + l2) > l1;
603 static int extent_pick_read_device(struct bch_fs *c,
604 struct bkey_s_c_extent e,
605 struct bch_devs_mask *avoid,
606 struct extent_pick_ptr *pick)
608 const struct bch_extent_ptr *ptr;
609 struct bch_extent_crc_unpacked crc;
613 extent_for_each_ptr_crc(e, ptr, crc) {
614 ca = bch_dev_bkey_exists(c, ptr->dev);
616 if (ptr->cached && ptr_stale(ca, ptr))
619 if (avoid && test_bit(ptr->dev, avoid->d))
622 if (ret && !dev_latency_better(c, ptr, &pick->ptr))
625 *pick = (struct extent_pick_ptr) {
638 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
640 if (bkey_extent_is_cached(k.k))
644 return "nonzero key size";
646 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
647 return "value too big";
651 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
652 const union bch_extent_entry *entry;
653 const struct bch_extent_ptr *ptr;
656 extent_for_each_entry(e, entry) {
657 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
658 return "invalid extent entry type";
660 if (extent_entry_is_crc(entry))
661 return "has crc field";
664 extent_for_each_ptr(e, ptr) {
665 reason = extent_ptr_invalid(c, e, ptr,
666 c->opts.btree_node_size,
676 return "invalid value type";
680 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
683 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
684 const struct bch_extent_ptr *ptr;
688 struct bucket_mark mark;
690 unsigned replicas = 0;
693 extent_for_each_ptr(e, ptr) {
694 ca = bch_dev_bkey_exists(c, ptr->dev);
697 if (!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags))
701 if (ptr_stale(ca, ptr))
705 seq = read_seqcount_begin(&c->gc_pos_lock);
706 mark = ptr_bucket_mark(ca, ptr);
708 bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
709 (mark.data_type != BCH_DATA_BTREE ||
710 mark.dirty_sectors < c->opts.btree_node_size);
711 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
713 err = "inconsistent";
718 if (!bch2_bkey_replicas_marked(c, BCH_DATA_BTREE, e.s_c)) {
719 bch2_bkey_val_to_text(c, btree_node_type(b),
720 buf, sizeof(buf), k);
722 "btree key bad (replicas not marked in superblock):\n%s",
729 bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
730 bch2_fs_bug(c, "%s btree pointer %s: bucket %zi "
732 err, buf, PTR_BUCKET_NR(ca, ptr),
733 mark.gen, (unsigned) mark.v.counter);
736 void bch2_btree_ptr_to_text(struct bch_fs *c, char *buf,
737 size_t size, struct bkey_s_c k)
739 char *out = buf, *end = buf + size;
742 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
744 if (bkey_extent_is_data(k.k))
745 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
747 invalid = bch2_btree_ptr_invalid(c, k);
749 p(" invalid: %s", invalid);
753 int bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b,
754 struct bch_devs_mask *avoid,
755 struct extent_pick_ptr *pick)
757 return extent_pick_read_device(c, bkey_i_to_s_c_extent(&b->key),
763 static bool __bch2_cut_front(struct bpos where, struct bkey_s k)
767 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
770 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
772 len = k.k->p.offset - where.offset;
774 BUG_ON(len > k.k->size);
777 * Don't readjust offset if the key size is now 0, because that could
778 * cause offset to point to the next bucket:
781 __set_bkey_deleted(k.k);
782 else if (bkey_extent_is_data(k.k)) {
783 struct bkey_s_extent e = bkey_s_to_extent(k);
784 union bch_extent_entry *entry;
785 bool seen_crc = false;
787 extent_for_each_entry(e, entry) {
788 switch (extent_entry_type(entry)) {
789 case BCH_EXTENT_ENTRY_ptr:
791 entry->ptr.offset += e.k->size - len;
793 case BCH_EXTENT_ENTRY_crc32:
794 entry->crc32.offset += e.k->size - len;
796 case BCH_EXTENT_ENTRY_crc64:
797 entry->crc64.offset += e.k->size - len;
799 case BCH_EXTENT_ENTRY_crc128:
800 entry->crc128.offset += e.k->size - len;
804 if (extent_entry_is_crc(entry))
814 bool bch2_cut_front(struct bpos where, struct bkey_i *k)
816 return __bch2_cut_front(where, bkey_i_to_s(k));
819 bool bch2_cut_back(struct bpos where, struct bkey *k)
823 if (bkey_cmp(where, k->p) >= 0)
826 EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
828 len = where.offset - bkey_start_offset(k);
830 BUG_ON(len > k->size);
836 __set_bkey_deleted(k);
842 * bch_key_resize - adjust size of @k
844 * bkey_start_offset(k) will be preserved, modifies where the extent ends
846 void bch2_key_resize(struct bkey *k,
849 k->p.offset -= k->size;
850 k->p.offset += new_size;
855 * In extent_sort_fix_overlapping(), insert_fixup_extent(),
856 * extent_merge_inline() - we're modifying keys in place that are packed. To do
857 * that we have to unpack the key, modify the unpacked key - then this
858 * copies/repacks the unpacked to the original as necessary.
860 static bool __extent_save(struct btree *b, struct btree_node_iter *iter,
861 struct bkey_packed *dst, struct bkey *src)
863 struct bkey_format *f = &b->format;
864 struct bkey_i *dst_unpacked;
867 if ((dst_unpacked = packed_to_bkey(dst))) {
868 dst_unpacked->k = *src;
871 ret = bch2_bkey_pack_key(dst, src, f);
875 bch2_verify_key_order(b, iter, dst);
880 static void extent_save(struct btree *b, struct btree_node_iter *iter,
881 struct bkey_packed *dst, struct bkey *src)
883 BUG_ON(!__extent_save(b, iter, dst, src));
887 * If keys compare equal, compare by pointer order:
889 * Necessary for sort_fix_overlapping() - if there are multiple keys that
890 * compare equal in different sets, we have to process them newest to oldest.
892 #define extent_sort_cmp(h, l, r) \
894 struct bkey _ul = bkey_unpack_key(b, \
895 __btree_node_offset_to_key(b, (l).k)); \
896 struct bkey _ur = bkey_unpack_key(b, \
897 __btree_node_offset_to_key(b, (r).k)); \
899 bkey_cmp(bkey_start_pos(&_ul), \
900 bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
903 static inline void extent_sort_sift(struct btree_node_iter_large *iter,
904 struct btree *b, size_t i)
906 heap_sift_down(iter, i, extent_sort_cmp);
909 static inline void extent_sort_next(struct btree_node_iter_large *iter,
911 struct btree_node_iter_set *i)
913 sort_key_next(iter, b, i);
914 heap_sift_down(iter, i - iter->data, extent_sort_cmp);
917 static void extent_sort_append(struct bch_fs *c,
919 struct btree_nr_keys *nr,
920 struct bkey_packed *start,
921 struct bkey_packed **prev,
922 struct bkey_packed *k)
924 struct bkey_format *f = &b->format;
927 if (bkey_whiteout(k))
930 bch2_bkey_unpack(b, &tmp.k, k);
933 bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
937 bch2_bkey_pack(*prev, (void *) *prev, f);
939 btree_keys_account_key_add(nr, 0, *prev);
940 *prev = bkey_next(*prev);
945 bkey_copy(*prev, &tmp.k);
948 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
951 struct btree_node_iter_large *iter)
953 struct bkey_format *f = &b->format;
954 struct btree_node_iter_set *_l = iter->data, *_r;
955 struct bkey_packed *prev = NULL, *out, *lk, *rk;
956 struct bkey l_unpacked, r_unpacked;
958 struct btree_nr_keys nr;
960 memset(&nr, 0, sizeof(nr));
962 heap_resort(iter, extent_sort_cmp);
964 while (!bch2_btree_node_iter_large_end(iter)) {
965 lk = __btree_node_offset_to_key(b, _l->k);
967 if (iter->used == 1) {
968 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
969 extent_sort_next(iter, b, _l);
974 if (iter->used > 2 &&
975 extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
978 rk = __btree_node_offset_to_key(b, _r->k);
980 l = __bkey_disassemble(b, lk, &l_unpacked);
981 r = __bkey_disassemble(b, rk, &r_unpacked);
983 /* If current key and next key don't overlap, just append */
984 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
985 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
986 extent_sort_next(iter, b, _l);
990 /* Skip 0 size keys */
992 extent_sort_next(iter, b, _r);
997 * overlap: keep the newer key and trim the older key so they
998 * don't overlap. comparing pointers tells us which one is
999 * newer, since the bsets are appended one after the other.
1002 /* can't happen because of comparison func */
1003 BUG_ON(_l->k < _r->k &&
1004 !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
1006 if (_l->k > _r->k) {
1007 /* l wins, trim r */
1008 if (bkey_cmp(l.k->p, r.k->p) >= 0) {
1009 sort_key_next(iter, b, _r);
1011 __bch2_cut_front(l.k->p, r);
1012 extent_save(b, NULL, rk, r.k);
1015 extent_sort_sift(iter, b, _r - iter->data);
1016 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
1020 * r wins, but it overlaps in the middle of l - split l:
1022 bkey_reassemble(&tmp.k, l.s_c);
1023 bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
1025 __bch2_cut_front(r.k->p, l);
1026 extent_save(b, NULL, lk, l.k);
1028 extent_sort_sift(iter, b, 0);
1030 extent_sort_append(c, b, &nr, dst->start, &prev,
1031 bkey_to_packed(&tmp.k));
1033 bch2_cut_back(bkey_start_pos(r.k), l.k);
1034 extent_save(b, NULL, lk, l.k);
1039 bch2_bkey_pack(prev, (void *) prev, f);
1040 btree_keys_account_key_add(&nr, 0, prev);
1041 out = bkey_next(prev);
1046 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
1050 struct extent_insert_state {
1051 struct btree_insert *trans;
1052 struct btree_insert_entry *insert;
1053 struct bpos committed;
1054 struct bch_fs_usage stats;
1057 struct bkey_i whiteout;
1062 static void bch2_add_sectors(struct extent_insert_state *s,
1063 struct bkey_s_c k, u64 offset, s64 sectors)
1065 struct bch_fs *c = s->trans->c;
1066 struct btree *b = s->insert->iter->l[0].b;
1068 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
1073 bch2_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
1074 &s->stats, s->trans->journal_res.seq, 0);
1077 static void bch2_subtract_sectors(struct extent_insert_state *s,
1078 struct bkey_s_c k, u64 offset, s64 sectors)
1080 bch2_add_sectors(s, k, offset, -sectors);
1083 /* These wrappers subtract exactly the sectors that we're removing from @k */
1084 static void bch2_cut_subtract_back(struct extent_insert_state *s,
1085 struct bpos where, struct bkey_s k)
1087 bch2_subtract_sectors(s, k.s_c, where.offset,
1088 k.k->p.offset - where.offset);
1089 bch2_cut_back(where, k.k);
1092 static void bch2_cut_subtract_front(struct extent_insert_state *s,
1093 struct bpos where, struct bkey_s k)
1095 bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
1096 where.offset - bkey_start_offset(k.k));
1097 __bch2_cut_front(where, k);
1100 static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
1103 bch2_subtract_sectors(s, k.s_c,
1104 bkey_start_offset(k.k), k.k->size);
1106 __set_bkey_deleted(k.k);
1109 static bool bch2_extent_merge_inline(struct bch_fs *,
1110 struct btree_iter *,
1111 struct bkey_packed *,
1112 struct bkey_packed *,
1115 #define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC)
1117 static enum btree_insert_ret
1118 extent_insert_should_stop(struct extent_insert_state *s)
1120 struct btree *b = s->insert->iter->l[0].b;
1123 * Check if we have sufficient space in both the btree node and the
1124 * journal reservation:
1126 * Each insert checks for room in the journal entry, but we check for
1127 * room in the btree node up-front. In the worst case, bkey_cmpxchg()
1128 * will insert two keys, and one iteration of this room will insert one
1129 * key, so we need room for three keys.
1131 if (!bch2_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s))
1132 return BTREE_INSERT_BTREE_NODE_FULL;
1133 else if (!journal_res_insert_fits(s->trans, s->insert))
1134 return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */
1136 return BTREE_INSERT_OK;
1139 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
1140 struct bkey_i *insert)
1142 struct btree_iter_level *l = &iter->l[0];
1143 struct bset_tree *t = bset_tree_last(l->b);
1144 struct bkey_packed *where =
1145 bch2_btree_node_iter_bset_pos(&l->iter, l->b, t);
1146 struct bkey_packed *prev = bch2_bkey_prev(l->b, t, where);
1147 struct bkey_packed *next_live_key = where;
1148 unsigned clobber_u64s;
1151 where = bkey_next(prev);
1153 while (next_live_key != btree_bkey_last(l->b, t) &&
1154 bkey_deleted(next_live_key))
1155 next_live_key = bkey_next(next_live_key);
1158 * Everything between where and next_live_key is now deleted keys, and
1161 clobber_u64s = (u64 *) next_live_key - (u64 *) where;
1164 bch2_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
1165 goto drop_deleted_keys;
1167 if (next_live_key != btree_bkey_last(l->b, t) &&
1168 bch2_extent_merge_inline(c, iter, bkey_to_packed(insert),
1169 next_live_key, false))
1170 goto drop_deleted_keys;
1172 bch2_bset_insert(l->b, &l->iter, where, insert, clobber_u64s);
1173 bch2_btree_node_iter_fix(iter, l->b, &l->iter, t, where,
1174 clobber_u64s, where->u64s);
1177 bch2_bset_delete(l->b, where, clobber_u64s);
1178 bch2_btree_node_iter_fix(iter, l->b, &l->iter, t,
1179 where, clobber_u64s, 0);
1182 static void extent_insert_committed(struct extent_insert_state *s)
1184 struct bch_fs *c = s->trans->c;
1185 struct btree_iter *iter = s->insert->iter;
1186 struct bkey_i *insert = !s->deleting
1189 BKEY_PADDED(k) split;
1191 EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
1192 EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
1194 if (!bkey_cmp(s->committed, bkey_start_pos(&insert->k)))
1197 if (s->deleting && !s->do_journal) {
1198 bch2_cut_front(s->committed, insert);
1202 EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
1204 bkey_copy(&split.k, insert);
1206 if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY) &&
1207 bkey_cmp(s->committed, insert->k.p) &&
1208 bch2_extent_is_compressed(bkey_i_to_s_c(insert))) {
1209 /* XXX: possibly need to increase our reservation? */
1210 bch2_cut_subtract_back(s, s->committed,
1211 bkey_i_to_s(&split.k));
1212 bch2_cut_front(s->committed, insert);
1213 bch2_add_sectors(s, bkey_i_to_s_c(insert),
1214 bkey_start_offset(&insert->k),
1217 bch2_cut_back(s->committed, &split.k.k);
1218 bch2_cut_front(s->committed, insert);
1221 if (debug_check_bkeys(c))
1222 bch2_bkey_debugcheck(c, iter->l[0].b, bkey_i_to_s_c(&split.k));
1224 bch2_btree_journal_key(s->trans, iter, &split.k);
1227 extent_bset_insert(c, iter, &split.k);
1229 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1231 insert->k.needs_whiteout = false;
1232 s->do_journal = false;
1233 s->trans->did_work = true;
1236 static enum btree_insert_ret
1237 __extent_insert_advance_pos(struct extent_insert_state *s,
1238 struct bpos next_pos,
1241 struct extent_insert_hook *hook = s->trans->hook;
1242 enum btree_insert_ret ret;
1245 ret = hook->fn(hook, s->committed, next_pos, k, s->insert->k);
1247 ret = BTREE_INSERT_OK;
1249 EBUG_ON(bkey_deleted(&s->insert->k->k) || !s->insert->k->k.size);
1251 if (ret == BTREE_INSERT_OK)
1252 s->committed = next_pos;
1258 * Update iter->pos, marking how much of @insert we've processed, and call hook
1261 static enum btree_insert_ret
1262 extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
1264 struct btree *b = s->insert->iter->l[0].b;
1265 struct bpos next_pos = bpos_min(s->insert->k->k.p,
1266 k.k ? k.k->p : b->key.k.p);
1267 enum btree_insert_ret ret;
1270 return BTREE_INSERT_NEED_TRAVERSE;
1273 if (k.k && bkey_cmp(s->committed, bkey_start_pos(k.k)) < 0) {
1274 ret = __extent_insert_advance_pos(s, bkey_start_pos(k.k),
1276 if (ret != BTREE_INSERT_OK)
1280 /* avoid redundant calls to hook fn: */
1281 if (!bkey_cmp(s->committed, next_pos))
1282 return BTREE_INSERT_OK;
1284 return __extent_insert_advance_pos(s, next_pos, k);
1287 static enum btree_insert_ret
1288 extent_insert_check_split_compressed(struct extent_insert_state *s,
1290 enum bch_extent_overlap overlap)
1292 struct bch_fs *c = s->trans->c;
1295 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1296 (sectors = bch2_extent_is_compressed(k))) {
1297 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
1299 if (s->trans->flags & BTREE_INSERT_NOFAIL)
1300 flags |= BCH_DISK_RESERVATION_NOFAIL;
1302 switch (bch2_disk_reservation_add(c,
1304 sectors * bch2_extent_nr_dirty_ptrs(k),
1309 return BTREE_INSERT_ENOSPC;
1311 return BTREE_INSERT_NEED_GC_LOCK;
1317 return BTREE_INSERT_OK;
1320 static enum btree_insert_ret
1321 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1322 struct bset_tree *t, struct bkey_packed *_k, struct bkey_s k,
1323 enum bch_extent_overlap overlap)
1325 struct bch_fs *c = s->trans->c;
1326 struct btree_iter *iter = s->insert->iter;
1327 struct btree_iter_level *l = &iter->l[0];
1328 struct btree *b = l->b;
1329 struct btree_node_iter *node_iter = &l->iter;
1330 enum btree_insert_ret ret;
1333 case BCH_EXTENT_OVERLAP_FRONT:
1334 /* insert overlaps with start of k: */
1335 bch2_cut_subtract_front(s, insert->k.p, k);
1336 BUG_ON(bkey_deleted(k.k));
1337 extent_save(b, node_iter, _k, k.k);
1340 case BCH_EXTENT_OVERLAP_BACK:
1341 /* insert overlaps with end of k: */
1342 bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
1343 BUG_ON(bkey_deleted(k.k));
1344 extent_save(b, node_iter, _k, k.k);
1347 * As the auxiliary tree is indexed by the end of the
1348 * key and we've just changed the end, update the
1351 bch2_bset_fix_invalidated_key(b, t, _k);
1352 bch2_btree_node_iter_fix(iter, b, node_iter, t,
1353 _k, _k->u64s, _k->u64s);
1356 case BCH_EXTENT_OVERLAP_ALL: {
1357 struct bpos orig_pos = k.k->p;
1359 /* The insert key completely covers k, invalidate k */
1360 if (!bkey_whiteout(k.k))
1361 btree_keys_account_key_drop(&b->nr,
1364 bch2_drop_subtract(s, k);
1365 k.k->p = bkey_start_pos(&insert->k);
1366 if (!__extent_save(b, node_iter, _k, k.k)) {
1368 * Couldn't repack: we aren't necessarily able
1369 * to repack if the new key is outside the range
1370 * of the old extent, so we have to split
1374 extent_save(b, node_iter, _k, k.k);
1376 ret = extent_insert_advance_pos(s, k.s_c);
1377 if (ret != BTREE_INSERT_OK)
1380 extent_insert_committed(s);
1382 * We split and inserted upto at k.k->p - that
1383 * has to coincide with iter->pos, so that we
1384 * don't have anything more we have to insert
1385 * until we recheck our journal reservation:
1387 EBUG_ON(bkey_cmp(s->committed, k.k->p));
1389 bch2_bset_fix_invalidated_key(b, t, _k);
1390 bch2_btree_node_iter_fix(iter, b, node_iter, t,
1391 _k, _k->u64s, _k->u64s);
1396 case BCH_EXTENT_OVERLAP_MIDDLE: {
1397 BKEY_PADDED(k) split;
1399 * The insert key falls 'in the middle' of k
1400 * The insert key splits k in 3:
1401 * - start only in k, preserve
1402 * - middle common section, invalidate in k
1403 * - end only in k, preserve
1405 * We update the old key to preserve the start,
1406 * insert will be the new common section,
1407 * we manually insert the end that we are preserving.
1409 * modify k _before_ doing the insert (which will move
1412 bkey_reassemble(&split.k, k.s_c);
1413 split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
1415 bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1416 BUG_ON(bkey_deleted(&split.k.k));
1418 bch2_cut_subtract_front(s, insert->k.p, k);
1419 BUG_ON(bkey_deleted(k.k));
1420 extent_save(b, node_iter, _k, k.k);
1422 bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
1423 bkey_start_offset(&split.k.k),
1425 extent_bset_insert(c, iter, &split.k);
1430 return BTREE_INSERT_OK;
1433 static enum btree_insert_ret
1434 __bch2_delete_fixup_extent(struct extent_insert_state *s)
1436 struct bch_fs *c = s->trans->c;
1437 struct btree_iter *iter = s->insert->iter;
1438 struct btree_iter_level *l = &iter->l[0];
1439 struct btree *b = l->b;
1440 struct btree_node_iter *node_iter = &l->iter;
1441 struct bkey_packed *_k;
1442 struct bkey unpacked;
1443 struct bkey_i *insert = s->insert->k;
1444 enum btree_insert_ret ret = BTREE_INSERT_OK;
1446 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1448 s->whiteout = *insert;
1450 while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1451 (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
1452 (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
1453 struct bset_tree *t = bch2_bkey_to_bset(b, _k);
1454 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1455 enum bch_extent_overlap overlap;
1457 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1458 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1460 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1463 if (bkey_whiteout(k.k)) {
1464 s->committed = bpos_min(insert->k.p, k.k->p);
1468 overlap = bch2_extent_overlap(&insert->k, k.k);
1470 ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
1474 ret = extent_insert_advance_pos(s, k.s_c);
1478 s->do_journal = true;
1480 if (overlap == BCH_EXTENT_OVERLAP_ALL) {
1481 btree_keys_account_key_drop(&b->nr,
1483 bch2_subtract_sectors(s, k.s_c,
1484 bkey_start_offset(k.k), k.k->size);
1485 _k->type = KEY_TYPE_DISCARD;
1486 reserve_whiteout(b, t, _k);
1487 } else if (k.k->needs_whiteout ||
1488 bset_written(b, bset(b, t))) {
1489 struct bkey_i discard = *insert;
1492 case BCH_EXTENT_OVERLAP_FRONT:
1493 bch2_cut_front(bkey_start_pos(k.k), &discard);
1495 case BCH_EXTENT_OVERLAP_BACK:
1496 bch2_cut_back(k.k->p, &discard.k);
1502 discard.k.needs_whiteout = true;
1504 ret = extent_squash(s, insert, t, _k, k, overlap);
1505 BUG_ON(ret != BTREE_INSERT_OK);
1507 extent_bset_insert(c, iter, &discard);
1509 ret = extent_squash(s, insert, t, _k, k, overlap);
1510 BUG_ON(ret != BTREE_INSERT_OK);
1513 bch2_cut_front(s->committed, insert);
1514 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1520 static enum btree_insert_ret
1521 __bch2_insert_fixup_extent(struct extent_insert_state *s)
1523 struct btree_iter *iter = s->insert->iter;
1524 struct btree_iter_level *l = &iter->l[0];
1525 struct btree *b = l->b;
1526 struct btree_node_iter *node_iter = &l->iter;
1527 struct bkey_packed *_k;
1528 struct bkey unpacked;
1529 struct bkey_i *insert = s->insert->k;
1530 enum btree_insert_ret ret = BTREE_INSERT_OK;
1532 while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1533 (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
1534 (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
1535 struct bset_tree *t = bch2_bkey_to_bset(b, _k);
1536 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1537 enum bch_extent_overlap overlap;
1539 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1540 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1542 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1545 overlap = bch2_extent_overlap(&insert->k, k.k);
1547 ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
1555 * Only call advance pos & call hook for nonzero size extents:
1557 ret = extent_insert_advance_pos(s, k.s_c);
1562 (k.k->needs_whiteout || bset_written(b, bset(b, t))))
1563 insert->k.needs_whiteout = true;
1565 if (overlap == BCH_EXTENT_OVERLAP_ALL &&
1566 bkey_whiteout(k.k) &&
1567 k.k->needs_whiteout) {
1568 unreserve_whiteout(b, t, _k);
1569 _k->needs_whiteout = false;
1572 ret = extent_squash(s, insert, t, _k, k, overlap);
1573 if (ret != BTREE_INSERT_OK)
1581 * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1583 * this may result in not actually doing the insert, or inserting some subset
1584 * of the insert key. For cmpxchg operations this is where that logic lives.
1586 * All subsets of @insert that need to be inserted are inserted using
1587 * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
1588 * returns false, setting @iter->pos for the prefix of @insert that actually got
1591 * BSET INVARIANTS: this function is responsible for maintaining all the
1592 * invariants for bsets of extents in memory. things get really hairy with 0
1597 * bkey_start_pos(bkey_next(k)) >= k
1598 * or bkey_start_offset(bkey_next(k)) >= k->offset
1600 * i.e. strict ordering, no overlapping extents.
1602 * multiple bsets (i.e. full btree node):
1605 * k.size != 0 ∧ j.size != 0 →
1606 * ¬ (k > bkey_start_pos(j) ∧ k < j)
1608 * i.e. no two overlapping keys _of nonzero size_
1610 * We can't realistically maintain this invariant for zero size keys because of
1611 * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
1612 * there may be another 0 size key between them in another bset, and it will
1613 * thus overlap with the merged key.
1615 * In addition, the end of iter->pos indicates how much has been processed.
1616 * If the end of iter->pos is not the same as the end of insert, then
1617 * key insertion needs to continue/be retried.
1619 enum btree_insert_ret
1620 bch2_insert_fixup_extent(struct btree_insert *trans,
1621 struct btree_insert_entry *insert)
1623 struct bch_fs *c = trans->c;
1624 struct btree_iter *iter = insert->iter;
1625 struct btree_iter_level *l = &iter->l[0];
1626 struct btree *b = l->b;
1627 enum btree_insert_ret ret = BTREE_INSERT_OK;
1629 struct extent_insert_state s = {
1632 .committed = insert->iter->pos,
1633 .deleting = bkey_whiteout(&insert->k->k),
1636 EBUG_ON(iter->level);
1637 EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size);
1640 * As we process overlapping extents, we advance @iter->pos both to
1641 * signal to our caller (btree_insert_key()) how much of @insert->k has
1642 * been inserted, and also to keep @iter->pos consistent with
1643 * @insert->k and the node iterator that we're advancing:
1645 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1648 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1649 bch2_add_sectors(&s, bkey_i_to_s_c(insert->k),
1650 bkey_start_offset(&insert->k->k),
1654 ? __bch2_insert_fixup_extent(&s)
1655 : __bch2_delete_fixup_extent(&s);
1657 if (ret == BTREE_INSERT_OK &&
1658 bkey_cmp(s.committed, insert->k->k.p) < 0)
1659 ret = extent_insert_advance_pos(&s, bkey_s_c_null);
1661 extent_insert_committed(&s);
1664 bch2_cut_front(iter->pos, insert->k);
1667 * Subtract any remaining sectors from @insert, if we bailed out early
1668 * and didn't fully insert @insert:
1671 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY) &&
1673 bch2_subtract_sectors(&s, bkey_i_to_s_c(insert->k),
1674 bkey_start_offset(&insert->k->k),
1677 bch2_fs_usage_apply(c, &s.stats, trans->disk_res,
1678 gc_pos_btree_node(b));
1680 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1681 EBUG_ON(bkey_cmp(iter->pos, s.committed));
1682 EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
1683 !!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
1685 if (insert->k->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
1686 ret = BTREE_INSERT_NEED_TRAVERSE;
1688 WARN_ONCE((ret == BTREE_INSERT_OK) != (insert->k->k.size == 0),
1689 "ret %u insert->k.size %u", ret, insert->k->k.size);
1694 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
1696 if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
1697 return "value too big";
1700 return "zero key size";
1702 switch (k.k->type) {
1704 case BCH_EXTENT_CACHED: {
1705 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1706 const union bch_extent_entry *entry;
1707 struct bch_extent_crc_unpacked crc;
1708 const struct bch_extent_ptr *ptr;
1709 unsigned size_ondisk = e.k->size;
1711 unsigned nonce = UINT_MAX;
1713 extent_for_each_entry(e, entry) {
1714 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1715 return "invalid extent entry type";
1717 if (extent_entry_is_crc(entry)) {
1718 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
1720 if (crc.offset + e.k->size >
1721 crc.uncompressed_size)
1722 return "checksum offset + key size > uncompressed size";
1724 size_ondisk = crc.compressed_size;
1726 if (!bch2_checksum_type_valid(c, crc.csum_type))
1727 return "invalid checksum type";
1729 if (crc.compression_type >= BCH_COMPRESSION_NR)
1730 return "invalid compression type";
1732 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1733 if (nonce == UINT_MAX)
1734 nonce = crc.offset + crc.nonce;
1735 else if (nonce != crc.offset + crc.nonce)
1736 return "incorrect nonce";
1739 ptr = entry_to_ptr(entry);
1741 reason = extent_ptr_invalid(c, e, &entry->ptr,
1742 size_ondisk, false);
1751 case BCH_RESERVATION: {
1752 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1754 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1755 return "incorrect value size";
1757 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1758 return "invalid nr_replicas";
1764 return "invalid value type";
1768 static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
1769 struct bkey_s_c_extent e)
1771 const struct bch_extent_ptr *ptr;
1773 struct bucket_mark mark;
1774 unsigned seq, stale;
1777 unsigned replicas = 0;
1780 * XXX: we should be doing most/all of these checks at startup time,
1781 * where we check bch2_bkey_invalid() in btree_node_read_done()
1783 * But note that we can't check for stale pointers or incorrect gc marks
1784 * until after journal replay is done (it might be an extent that's
1785 * going to get overwritten during replay)
1788 extent_for_each_ptr(e, ptr) {
1789 ca = bch_dev_bkey_exists(c, ptr->dev);
1793 * If journal replay hasn't finished, we might be seeing keys
1794 * that will be overwritten by the time journal replay is done:
1796 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1802 seq = read_seqcount_begin(&c->gc_pos_lock);
1803 mark = ptr_bucket_mark(ca, ptr);
1805 /* between mark and bucket gen */
1808 stale = ptr_stale(ca, ptr);
1810 bch2_fs_bug_on(stale && !ptr->cached, c,
1811 "stale dirty pointer");
1813 bch2_fs_bug_on(stale > 96, c,
1814 "key too stale: %i",
1820 bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
1821 (mark.data_type != BCH_DATA_USER ||
1823 ? mark.cached_sectors
1824 : mark.dirty_sectors));
1825 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
1831 if (replicas > BCH_REPLICAS_MAX) {
1832 bch2_bkey_val_to_text(c, btree_node_type(b), buf,
1833 sizeof(buf), e.s_c);
1835 "extent key bad (too many replicas: %u): %s",
1840 if (!bkey_extent_is_cached(e.k) &&
1841 !bch2_bkey_replicas_marked(c, BCH_DATA_USER, e.s_c)) {
1842 bch2_bkey_val_to_text(c, btree_node_type(b),
1843 buf, sizeof(buf), e.s_c);
1845 "extent key bad (replicas not marked in superblock):\n%s",
1853 bch2_bkey_val_to_text(c, btree_node_type(b), buf,
1854 sizeof(buf), e.s_c);
1855 bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu "
1856 "gen %i type %u", buf,
1857 PTR_BUCKET_NR(ca, ptr), mark.gen, mark.data_type);
1861 void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
1863 switch (k.k->type) {
1865 case BCH_EXTENT_CACHED:
1866 bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
1868 case BCH_RESERVATION:
1875 void bch2_extent_to_text(struct bch_fs *c, char *buf,
1876 size_t size, struct bkey_s_c k)
1878 char *out = buf, *end = buf + size;
1879 const char *invalid;
1881 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
1883 if (bkey_extent_is_data(k.k))
1884 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
1886 invalid = bch2_extent_invalid(c, k);
1888 p(" invalid: %s", invalid);
1892 static void bch2_extent_crc_init(union bch_extent_crc *crc,
1893 struct bch_extent_crc_unpacked new)
1895 #define common_fields(_crc) \
1896 .csum_type = _crc.csum_type, \
1897 .compression_type = _crc.compression_type, \
1898 ._compressed_size = _crc.compressed_size - 1, \
1899 ._uncompressed_size = _crc.uncompressed_size - 1, \
1900 .offset = _crc.offset
1902 if (bch_crc_bytes[new.csum_type] <= 4 &&
1903 new.uncompressed_size <= CRC32_SIZE_MAX &&
1904 new.nonce <= CRC32_NONCE_MAX) {
1905 crc->crc32 = (struct bch_extent_crc32) {
1906 .type = 1 << BCH_EXTENT_ENTRY_crc32,
1908 .csum = *((__le32 *) &new.csum.lo),
1913 if (bch_crc_bytes[new.csum_type] <= 10 &&
1914 new.uncompressed_size <= CRC64_SIZE_MAX &&
1915 new.nonce <= CRC64_NONCE_MAX) {
1916 crc->crc64 = (struct bch_extent_crc64) {
1917 .type = 1 << BCH_EXTENT_ENTRY_crc64,
1920 .csum_lo = new.csum.lo,
1921 .csum_hi = *((__le16 *) &new.csum.hi),
1926 if (bch_crc_bytes[new.csum_type] <= 16 &&
1927 new.uncompressed_size <= CRC128_SIZE_MAX &&
1928 new.nonce <= CRC128_NONCE_MAX) {
1929 crc->crc128 = (struct bch_extent_crc128) {
1930 .type = 1 << BCH_EXTENT_ENTRY_crc128,
1937 #undef common_fields
1941 void bch2_extent_crc_append(struct bkey_i_extent *e,
1942 struct bch_extent_crc_unpacked new)
1944 struct bch_extent_crc_unpacked crc;
1945 const union bch_extent_entry *i;
1947 BUG_ON(new.compressed_size > new.uncompressed_size);
1948 BUG_ON(new.live_size != e->k.size);
1949 BUG_ON(!new.compressed_size || !new.uncompressed_size);
1952 * Look up the last crc entry, so we can check if we need to add
1955 extent_for_each_crc(extent_i_to_s(e), crc, i)
1958 if (!bch2_crc_unpacked_cmp(crc, new))
1961 bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), new);
1962 __extent_entry_push(e);
1966 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1968 * Returns true if @k should be dropped entirely
1970 * For existing keys, only called when btree nodes are being rewritten, not when
1971 * they're merely being compacted/resorted in memory.
1973 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1975 struct bkey_s_extent e;
1977 switch (k.k->type) {
1978 case KEY_TYPE_ERROR:
1981 case KEY_TYPE_DELETED:
1982 case KEY_TYPE_COOKIE:
1985 case KEY_TYPE_DISCARD:
1986 return bversion_zero(k.k->version);
1989 case BCH_EXTENT_CACHED:
1990 e = bkey_s_to_extent(k);
1992 bch2_extent_drop_stale(c, e);
1994 if (!bkey_val_u64s(e.k)) {
1995 if (bkey_extent_is_cached(e.k)) {
1996 k.k->type = KEY_TYPE_DISCARD;
1997 if (bversion_zero(k.k->version))
2000 k.k->type = KEY_TYPE_ERROR;
2005 case BCH_RESERVATION:
2012 void bch2_extent_mark_replicas_cached(struct bch_fs *c,
2013 struct bkey_s_extent e,
2015 unsigned nr_desired_replicas)
2017 struct bch_extent_ptr *ptr;
2018 int extra = bch2_extent_durability(c, e.c) - nr_desired_replicas;
2020 if (target && extra > 0)
2021 extent_for_each_ptr(e, ptr) {
2022 int n = bch2_extent_ptr_durability(c, ptr);
2024 if (n && n <= extra &&
2025 !bch2_dev_in_target(c, ptr->dev, target)) {
2032 extent_for_each_ptr(e, ptr) {
2033 int n = bch2_extent_ptr_durability(c, ptr);
2035 if (n && n <= extra) {
2043 * This picks a non-stale pointer, preferably from a device other than @avoid.
2044 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
2045 * other devices, it will still pick a pointer from avoid.
2047 int bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
2048 struct bch_devs_mask *avoid,
2049 struct extent_pick_ptr *pick)
2053 switch (k.k->type) {
2054 case KEY_TYPE_DELETED:
2055 case KEY_TYPE_DISCARD:
2056 case KEY_TYPE_COOKIE:
2059 case KEY_TYPE_ERROR:
2063 case BCH_EXTENT_CACHED:
2064 ret = extent_pick_read_device(c, bkey_s_c_to_extent(k),
2067 if (!ret && !bkey_extent_is_cached(k.k))
2072 case BCH_RESERVATION:
2080 enum merge_result bch2_extent_merge(struct bch_fs *c, struct btree *b,
2081 struct bkey_i *l, struct bkey_i *r)
2083 struct bkey_s_extent el, er;
2084 union bch_extent_entry *en_l, *en_r;
2086 if (key_merging_disabled(c))
2087 return BCH_MERGE_NOMERGE;
2090 * Generic header checks
2091 * Assumes left and right are in order
2092 * Left and right must be exactly aligned
2095 if (l->k.u64s != r->k.u64s ||
2096 l->k.type != r->k.type ||
2097 bversion_cmp(l->k.version, r->k.version) ||
2098 bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
2099 return BCH_MERGE_NOMERGE;
2101 switch (l->k.type) {
2102 case KEY_TYPE_DELETED:
2103 case KEY_TYPE_DISCARD:
2104 case KEY_TYPE_ERROR:
2105 /* These types are mergeable, and no val to check */
2109 case BCH_EXTENT_CACHED:
2110 el = bkey_i_to_s_extent(l);
2111 er = bkey_i_to_s_extent(r);
2113 extent_for_each_entry(el, en_l) {
2114 struct bch_extent_ptr *lp, *rp;
2117 en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
2119 if ((extent_entry_type(en_l) !=
2120 extent_entry_type(en_r)) ||
2121 extent_entry_is_crc(en_l))
2122 return BCH_MERGE_NOMERGE;
2127 if (lp->offset + el.k->size != rp->offset ||
2128 lp->dev != rp->dev ||
2130 return BCH_MERGE_NOMERGE;
2132 /* We don't allow extents to straddle buckets: */
2133 ca = bch_dev_bkey_exists(c, lp->dev);
2135 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
2136 return BCH_MERGE_NOMERGE;
2140 case BCH_RESERVATION: {
2141 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
2142 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
2144 if (li->v.generation != ri->v.generation ||
2145 li->v.nr_replicas != ri->v.nr_replicas)
2146 return BCH_MERGE_NOMERGE;
2150 return BCH_MERGE_NOMERGE;
2153 l->k.needs_whiteout |= r->k.needs_whiteout;
2155 /* Keys with no pointers aren't restricted to one bucket and could
2158 if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
2159 bch2_key_resize(&l->k, KEY_SIZE_MAX);
2160 bch2_cut_front(l->k.p, r);
2161 return BCH_MERGE_PARTIAL;
2164 bch2_key_resize(&l->k, l->k.size + r->k.size);
2166 return BCH_MERGE_MERGE;
2169 static void extent_i_save(struct btree *b, struct bkey_packed *dst,
2172 struct bkey_format *f = &b->format;
2173 struct bkey_i *dst_unpacked;
2175 BUG_ON(bkeyp_val_u64s(f, dst) != bkey_val_u64s(&src->k));
2178 * We don't want the bch2_verify_key_order() call in extent_save(),
2179 * because we may be out of order with deleted keys that are about to be
2180 * removed by extent_bset_insert()
2183 if ((dst_unpacked = packed_to_bkey(dst)))
2184 bkey_copy(dst_unpacked, src);
2186 BUG_ON(!bch2_bkey_pack(dst, src, f));
2189 static bool extent_merge_one_overlapping(struct btree_iter *iter,
2190 struct bpos new_pos,
2191 struct bset_tree *t,
2192 struct bkey_packed *k, struct bkey uk,
2193 bool check, bool could_pack)
2195 struct btree_iter_level *l = &iter->l[0];
2197 BUG_ON(!bkey_deleted(k));
2200 return !bkey_packed(k) || could_pack;
2203 extent_save(l->b, &l->iter, k, &uk);
2204 bch2_bset_fix_invalidated_key(l->b, t, k);
2205 bch2_btree_node_iter_fix(iter, l->b, &l->iter, t,
2206 k, k->u64s, k->u64s);
2211 static bool extent_merge_do_overlapping(struct btree_iter *iter,
2212 struct bkey *m, bool back_merge)
2214 struct btree_iter_level *l = &iter->l[0];
2215 struct btree *b = l->b;
2216 struct btree_node_iter *node_iter = &l->iter;
2217 struct bset_tree *t;
2218 struct bkey_packed *k;
2220 struct bpos new_pos = back_merge ? m->p : bkey_start_pos(m);
2221 bool could_pack = bkey_pack_pos((void *) &uk, new_pos, b);
2225 * @m is the new merged extent:
2227 * The merge took place in the last bset; we know there can't be any 0
2228 * size extents overlapping with m there because if so they would have
2229 * been between the two extents we merged.
2231 * But in the other bsets, we have to check for and fix such extents:
2234 for_each_bset(b, t) {
2235 if (t == bset_tree_last(b))
2239 * if we don't find this bset in the iterator we already got to
2240 * the end of that bset, so start searching from the end.
2242 k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
2244 if (k == btree_bkey_last(b, t))
2245 k = bch2_bkey_prev_all(b, t, k);
2251 * Back merge: 0 size extents will be before the key
2252 * that was just inserted (and thus the iterator
2253 * position) - walk backwards to find them
2257 (uk = bkey_unpack_key(b, k),
2258 bkey_cmp(uk.p, bkey_start_pos(m)) > 0);
2259 k = bch2_bkey_prev_all(b, t, k)) {
2260 if (bkey_cmp(uk.p, m->p) >= 0)
2263 if (!extent_merge_one_overlapping(iter, new_pos,
2264 t, k, uk, check, could_pack))
2268 /* Front merge - walk forwards */
2270 k != btree_bkey_last(b, t) &&
2271 (uk = bkey_unpack_key(b, k),
2272 bkey_cmp(uk.p, m->p) < 0);
2275 bkey_start_pos(m)) <= 0)
2278 if (!extent_merge_one_overlapping(iter, new_pos,
2279 t, k, uk, check, could_pack))
2294 * When merging an extent that we're inserting into a btree node, the new merged
2295 * extent could overlap with an existing 0 size extent - if we don't fix that,
2296 * it'll break the btree node iterator so this code finds those 0 size extents
2297 * and shifts them out of the way.
2299 * Also unpacks and repacks.
2301 static bool bch2_extent_merge_inline(struct bch_fs *c,
2302 struct btree_iter *iter,
2303 struct bkey_packed *l,
2304 struct bkey_packed *r,
2307 struct btree *b = iter->l[0].b;
2308 struct btree_node_iter *node_iter = &iter->l[0].iter;
2309 const struct bkey_format *f = &b->format;
2310 struct bset_tree *t = bset_tree_last(b);
2311 struct bkey_packed *m;
2318 * We need to save copies of both l and r, because we might get a
2319 * partial merge (which modifies both) and then fails to repack
2321 bch2_bkey_unpack(b, &li.k, l);
2322 bch2_bkey_unpack(b, &ri.k, r);
2324 m = back_merge ? l : r;
2325 mi = back_merge ? &li.k : &ri.k;
2327 /* l & r should be in last bset: */
2328 EBUG_ON(bch2_bkey_to_bset(b, m) != t);
2330 switch (bch2_extent_merge(c, b, &li.k, &ri.k)) {
2331 case BCH_MERGE_NOMERGE:
2333 case BCH_MERGE_PARTIAL:
2334 if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &mi->k, f))
2337 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2340 extent_i_save(b, m, mi);
2341 bch2_bset_fix_invalidated_key(b, t, m);
2344 * Update iterator to reflect what we just inserted - otherwise,
2345 * the iter_fix() call is going to put us _before_ the key we
2346 * just partially merged with:
2349 bch2_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
2351 bch2_btree_node_iter_fix(iter, b, node_iter,
2352 t, m, m->u64s, m->u64s);
2355 bkey_copy(packed_to_bkey(l), &li.k);
2357 bkey_copy(packed_to_bkey(r), &ri.k);
2359 case BCH_MERGE_MERGE:
2360 if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &li.k.k, f))
2363 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2366 extent_i_save(b, m, &li.k);
2367 bch2_bset_fix_invalidated_key(b, t, m);
2369 bch2_btree_node_iter_fix(iter, b, node_iter,
2370 t, m, m->u64s, m->u64s);
2377 int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size)
2379 struct btree_iter iter;
2380 struct bpos end = pos;
2386 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos,
2387 BTREE_ITER_SLOTS, k) {
2388 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2391 if (!bch2_extent_is_fully_allocated(k)) {
2396 bch2_btree_iter_unlock(&iter);