2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 * Code for managing the extent btree and dynamically updating the writeback
9 #include "bkey_methods.h"
11 #include "btree_update.h"
12 #include "btree_update_interior.h"
26 #include <trace/events/bcachefs.h>
28 static enum merge_result bch2_extent_merge(struct bch_fs *, struct btree *,
29 struct bkey_i *, struct bkey_i *);
31 static void sort_key_next(struct btree_node_iter *iter,
33 struct btree_node_iter_set *i)
35 i->k += __btree_node_offset_to_key(b, i->k)->u64s;
38 *i = iter->data[--iter->used];
42 * Returns true if l > r - unless l == r, in which case returns true if l is
45 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
46 * equal in different sets, we have to process them newest to oldest.
48 #define key_sort_cmp(h, l, r) \
51 __btree_node_offset_to_key(b, (l).k), \
52 __btree_node_offset_to_key(b, (r).k)) \
57 static inline bool should_drop_next_key(struct btree_node_iter *iter,
60 struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
61 struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
70 key_sort_cmp(iter, r[0], r[1]) >= 0)
74 * key_sort_cmp() ensures that when keys compare equal the older key
75 * comes first; so if l->k compares equal to r->k then l->k is older and
78 return !bkey_cmp_packed(b,
79 __btree_node_offset_to_key(b, l->k),
80 __btree_node_offset_to_key(b, r->k));
83 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
85 struct btree_node_iter *iter)
87 struct bkey_packed *out = dst->start;
88 struct btree_nr_keys nr;
90 memset(&nr, 0, sizeof(nr));
92 heap_resort(iter, key_sort_cmp);
94 while (!bch2_btree_node_iter_end(iter)) {
95 if (!should_drop_next_key(iter, b)) {
96 struct bkey_packed *k =
97 __btree_node_offset_to_key(b, iter->data->k);
100 btree_keys_account_key_add(&nr, 0, out);
101 out = bkey_next(out);
104 sort_key_next(iter, b, iter->data);
105 heap_sift_down(iter, 0, key_sort_cmp);
108 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
112 /* Common among btree and extent ptrs */
114 const struct bch_extent_ptr *
115 bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
117 const struct bch_extent_ptr *ptr;
119 extent_for_each_ptr(e, ptr)
126 bool bch2_extent_drop_device(struct bkey_s_extent e, unsigned dev)
128 struct bch_extent_ptr *ptr;
129 bool dropped = false;
131 extent_for_each_ptr_backwards(e, ptr)
132 if (ptr->dev == dev) {
133 __bch2_extent_drop_ptr(e, ptr);
138 bch2_extent_drop_redundant_crcs(e);
142 const struct bch_extent_ptr *
143 bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
145 const struct bch_extent_ptr *ptr;
147 extent_for_each_ptr(e, ptr) {
148 struct bch_dev *ca = c->devs[ptr->dev];
151 ca->mi.group == group)
158 const struct bch_extent_ptr *
159 bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
161 const struct bch_extent_ptr *ptr;
163 extent_for_each_ptr(e, ptr)
164 if (dev_in_target(c->devs[ptr->dev], target))
170 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
172 const struct bch_extent_ptr *ptr;
173 unsigned nr_ptrs = 0;
175 extent_for_each_ptr(e, ptr)
181 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k)
183 struct bkey_s_c_extent e;
184 const struct bch_extent_ptr *ptr;
185 unsigned nr_ptrs = 0;
189 case BCH_EXTENT_CACHED:
190 e = bkey_s_c_to_extent(k);
192 extent_for_each_ptr(e, ptr)
193 nr_ptrs += !ptr->cached;
196 case BCH_RESERVATION:
197 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
204 unsigned bch2_extent_nr_good_ptrs(struct bch_fs *c, struct bkey_s_c_extent e)
206 const struct bch_extent_ptr *ptr;
207 unsigned nr_ptrs = 0;
209 extent_for_each_ptr(e, ptr)
210 nr_ptrs += (!ptr->cached &&
211 bch_dev_bkey_exists(c, ptr->dev)->mi.state !=
212 BCH_MEMBER_STATE_FAILED);
217 unsigned bch2_extent_is_compressed(struct bkey_s_c k)
219 struct bkey_s_c_extent e;
220 const struct bch_extent_ptr *ptr;
221 struct bch_extent_crc_unpacked crc;
226 case BCH_EXTENT_CACHED:
227 e = bkey_s_c_to_extent(k);
229 extent_for_each_ptr_crc(e, ptr, crc)
231 crc.compression_type != BCH_COMPRESSION_NONE &&
232 crc.compressed_size < crc.live_size)
233 ret = max_t(unsigned, ret, crc.compressed_size);
239 bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
240 struct bch_extent_ptr m, u64 offset)
242 const struct bch_extent_ptr *ptr;
243 struct bch_extent_crc_unpacked crc;
245 extent_for_each_ptr_crc(e, ptr, crc)
246 if (ptr->dev == m.dev &&
248 (s64) ptr->offset + crc.offset - bkey_start_offset(e.k) ==
249 (s64) m.offset - offset)
255 /* Doesn't cleanup redundant crcs */
256 void __bch2_extent_drop_ptr(struct bkey_s_extent e, struct bch_extent_ptr *ptr)
258 EBUG_ON(ptr < &e.v->start->ptr ||
259 ptr >= &extent_entry_last(e)->ptr);
260 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
261 memmove_u64s_down(ptr, ptr + 1,
262 (u64 *) extent_entry_last(e) - (u64 *) (ptr + 1));
263 e.k->u64s -= sizeof(*ptr) / sizeof(u64);
266 void bch2_extent_drop_ptr(struct bkey_s_extent e, struct bch_extent_ptr *ptr)
268 __bch2_extent_drop_ptr(e, ptr);
269 bch2_extent_drop_redundant_crcs(e);
272 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
273 struct bch_extent_crc_unpacked n)
275 return !u.compression_type &&
277 u.uncompressed_size > u.live_size &&
278 bch2_csum_type_is_encryption(u.csum_type) ==
279 bch2_csum_type_is_encryption(n.csum_type);
282 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
283 struct bch_extent_crc_unpacked n)
285 struct bch_extent_crc_unpacked crc;
286 const union bch_extent_entry *i;
291 extent_for_each_crc(e, crc, i)
292 if (can_narrow_crc(crc, n))
299 * We're writing another replica for this extent, so while we've got the data in
300 * memory we'll be computing a new checksum for the currently live data.
302 * If there are other replicas we aren't moving, and they are checksummed but
303 * not compressed, we can modify them to point to only the data that is
304 * currently live (so that readers won't have to bounce) while we've got the
307 bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
308 struct bch_extent_crc_unpacked n)
310 struct bch_extent_crc_unpacked u;
311 struct bch_extent_ptr *ptr;
312 union bch_extent_entry *i;
314 /* Find a checksum entry that covers only live data: */
316 extent_for_each_crc(extent_i_to_s(e), u, i)
317 if (!u.compression_type &&
319 u.live_size == u.uncompressed_size) {
324 if (!bch2_can_narrow_extent_crcs(extent_i_to_s_c(e), n))
327 BUG_ON(n.compression_type);
329 BUG_ON(n.live_size != e->k.size);
331 bch2_extent_crc_append(e, n);
332 restart_narrow_pointers:
333 extent_for_each_ptr_crc(extent_i_to_s(e), ptr, u)
334 if (can_narrow_crc(u, n)) {
335 ptr->offset += u.offset;
336 extent_ptr_append(e, *ptr);
337 __bch2_extent_drop_ptr(extent_i_to_s(e), ptr);
338 goto restart_narrow_pointers;
341 bch2_extent_drop_redundant_crcs(extent_i_to_s(e));
345 void bch2_extent_drop_redundant_crcs(struct bkey_s_extent e)
347 union bch_extent_entry *entry = e.v->start;
348 union bch_extent_crc *crc, *prev = NULL;
349 struct bch_extent_crc_unpacked u, prev_u;
351 while (entry != extent_entry_last(e)) {
352 union bch_extent_entry *next = extent_entry_next(entry);
353 size_t crc_u64s = extent_entry_u64s(entry);
355 if (!extent_entry_is_crc(entry))
358 crc = entry_to_crc(entry);
359 u = bch2_extent_crc_unpack(e.k, crc);
361 if (next == extent_entry_last(e)) {
362 /* crc entry with no pointers after it: */
366 if (extent_entry_is_crc(next)) {
367 /* no pointers before next crc entry: */
371 if (prev && !memcmp(&u, &prev_u, sizeof(u))) {
372 /* identical to previous crc entry: */
378 !u.compression_type) {
379 /* null crc entry: */
380 union bch_extent_entry *e2;
382 extent_for_each_entry_from(e, e2, extent_entry_next(entry)) {
383 if (!extent_entry_is_ptr(e2))
386 e2->ptr.offset += u.offset;
397 memmove_u64s_down(crc, next,
398 (u64 *) extent_entry_last(e) - (u64 *) next);
399 e.k->u64s -= crc_u64s;
402 EBUG_ON(bkey_val_u64s(e.k) && !bch2_extent_nr_ptrs(e.c));
405 static bool should_drop_ptr(const struct bch_fs *c,
406 struct bkey_s_c_extent e,
407 const struct bch_extent_ptr *ptr)
409 return ptr->cached && ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr);
412 static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
414 struct bch_extent_ptr *ptr = &e.v->start->ptr;
415 bool dropped = false;
417 while ((ptr = extent_ptr_next(e, ptr)))
418 if (should_drop_ptr(c, e.c, ptr)) {
419 __bch2_extent_drop_ptr(e, ptr);
425 bch2_extent_drop_redundant_crcs(e);
428 static bool bch2_ptr_normalize(struct bch_fs *c, struct btree *bk,
431 return bch2_extent_normalize(c, k);
434 static void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
438 case BCH_EXTENT_CACHED: {
439 union bch_extent_entry *entry;
440 u64 *d = (u64 *) bkeyp_val(f, k);
443 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
446 for (entry = (union bch_extent_entry *) d;
447 entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
448 entry = extent_entry_next(entry)) {
449 switch (extent_entry_type(entry)) {
450 case BCH_EXTENT_ENTRY_crc32:
451 entry->crc32.csum = swab32(entry->crc32.csum);
453 case BCH_EXTENT_ENTRY_crc64:
454 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
455 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
457 case BCH_EXTENT_ENTRY_crc128:
458 entry->crc128.csum.hi = (__force __le64)
459 swab64((__force u64) entry->crc128.csum.hi);
460 entry->crc128.csum.lo = (__force __le64)
461 swab64((__force u64) entry->crc128.csum.lo);
463 case BCH_EXTENT_ENTRY_ptr:
472 static const char *extent_ptr_invalid(const struct bch_fs *c,
473 struct bkey_s_c_extent e,
474 const struct bch_extent_ptr *ptr,
475 unsigned size_ondisk,
478 const struct bch_extent_ptr *ptr2;
481 if (ptr->dev >= c->sb.nr_devices ||
483 return "pointer to invalid device";
485 ca = bch_dev_bkey_exists(c, ptr->dev);
487 return "pointer to invalid device";
489 extent_for_each_ptr(e, ptr2)
490 if (ptr != ptr2 && ptr->dev == ptr2->dev)
491 return "multiple pointers to same device";
493 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
494 return "offset past end of device";
496 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
497 return "offset before first bucket";
499 if (bucket_remainder(ca, ptr->offset) +
500 size_ondisk > ca->mi.bucket_size)
501 return "spans multiple buckets";
506 static size_t extent_print_ptrs(struct bch_fs *c, char *buf,
507 size_t size, struct bkey_s_c_extent e)
509 char *out = buf, *end = buf + size;
510 const union bch_extent_entry *entry;
511 struct bch_extent_crc_unpacked crc;
512 const struct bch_extent_ptr *ptr;
516 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
518 extent_for_each_entry(e, entry) {
522 switch (__extent_entry_type(entry)) {
523 case BCH_EXTENT_ENTRY_crc32:
524 case BCH_EXTENT_ENTRY_crc64:
525 case BCH_EXTENT_ENTRY_crc128:
526 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
528 p("crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
530 crc.uncompressed_size,
531 crc.offset, crc.nonce,
533 crc.compression_type);
535 case BCH_EXTENT_ENTRY_ptr:
536 ptr = entry_to_ptr(entry);
537 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
538 ? bch_dev_bkey_exists(c, ptr->dev)
541 p("ptr: %u:%llu gen %u%s", ptr->dev,
542 (u64) ptr->offset, ptr->gen,
543 ca && ptr_stale(ca, ptr)
547 p("(invalid extent entry %.16llx)", *((u64 *) entry));
554 if (bkey_extent_is_cached(e.k))
560 static inline bool dev_latency_better(struct bch_dev *dev1,
561 struct bch_dev *dev2)
563 unsigned l1 = atomic_read(&dev1->latency[READ]);
564 unsigned l2 = atomic_read(&dev2->latency[READ]);
566 /* Pick at random, biased in favor of the faster device: */
568 return bch2_rand_range(l1 + l2) > l1;
571 static void extent_pick_read_device(struct bch_fs *c,
572 struct bkey_s_c_extent e,
573 struct bch_devs_mask *avoid,
574 struct extent_pick_ptr *pick)
576 const struct bch_extent_ptr *ptr;
577 struct bch_extent_crc_unpacked crc;
579 extent_for_each_ptr_crc(e, ptr, crc) {
580 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
582 if (ptr->cached && ptr_stale(ca, ptr))
585 if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
589 if (test_bit(ca->dev_idx, avoid->d))
593 test_bit(pick->ca->dev_idx, avoid->d))
597 if (pick->ca && !dev_latency_better(ca, pick->ca))
600 if (!percpu_ref_tryget(&ca->io_ref))
604 percpu_ref_put(&pick->ca->io_ref);
606 *pick = (struct extent_pick_ptr) {
616 static const char *bch2_btree_ptr_invalid(const struct bch_fs *c,
619 if (bkey_extent_is_cached(k.k))
623 return "nonzero key size";
625 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
626 return "value too big";
630 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
631 const union bch_extent_entry *entry;
632 const struct bch_extent_ptr *ptr;
635 extent_for_each_entry(e, entry) {
636 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
637 return "invalid extent entry type";
639 if (extent_entry_is_crc(entry))
640 return "has crc field";
643 extent_for_each_ptr(e, ptr) {
644 reason = extent_ptr_invalid(c, e, ptr,
645 c->opts.btree_node_size,
655 return "invalid value type";
659 static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
662 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
663 const struct bch_extent_ptr *ptr;
667 struct bucket_mark mark;
669 unsigned replicas = 0;
672 extent_for_each_ptr(e, ptr) {
673 ca = bch_dev_bkey_exists(c, ptr->dev);
676 if (!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags))
680 if (ptr_stale(ca, ptr))
684 seq = read_seqcount_begin(&c->gc_pos_lock);
685 mark = ptr_bucket_mark(ca, ptr);
687 bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
688 (mark.data_type != BCH_DATA_BTREE ||
689 mark.dirty_sectors < c->opts.btree_node_size);
690 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
692 err = "inconsistent";
697 if (!bch2_bkey_replicas_marked(c, BCH_DATA_BTREE, e.s_c)) {
698 bch2_bkey_val_to_text(c, btree_node_type(b),
699 buf, sizeof(buf), k);
701 "btree key bad (replicas not marked in superblock):\n%s",
708 bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
709 bch2_fs_bug(c, "%s btree pointer %s: bucket %zi "
711 err, buf, PTR_BUCKET_NR(ca, ptr),
712 mark.gen, (unsigned) mark.counter);
715 static void bch2_btree_ptr_to_text(struct bch_fs *c, char *buf,
716 size_t size, struct bkey_s_c k)
718 char *out = buf, *end = buf + size;
721 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
723 if (bkey_extent_is_data(k.k))
724 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
726 invalid = bch2_btree_ptr_invalid(c, k);
728 p(" invalid: %s", invalid);
732 struct extent_pick_ptr
733 bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b,
734 struct bch_devs_mask *avoid)
736 struct extent_pick_ptr pick = { .ca = NULL };
738 extent_pick_read_device(c, bkey_i_to_s_c_extent(&b->key),
744 const struct bkey_ops bch2_bkey_btree_ops = {
745 .key_invalid = bch2_btree_ptr_invalid,
746 .key_debugcheck = btree_ptr_debugcheck,
747 .val_to_text = bch2_btree_ptr_to_text,
748 .swab = bch2_ptr_swab,
753 static bool __bch2_cut_front(struct bpos where, struct bkey_s k)
757 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
760 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
762 len = k.k->p.offset - where.offset;
764 BUG_ON(len > k.k->size);
767 * Don't readjust offset if the key size is now 0, because that could
768 * cause offset to point to the next bucket:
771 __set_bkey_deleted(k.k);
772 else if (bkey_extent_is_data(k.k)) {
773 struct bkey_s_extent e = bkey_s_to_extent(k);
774 union bch_extent_entry *entry;
775 bool seen_crc = false;
777 extent_for_each_entry(e, entry) {
778 switch (extent_entry_type(entry)) {
779 case BCH_EXTENT_ENTRY_ptr:
781 entry->ptr.offset += e.k->size - len;
783 case BCH_EXTENT_ENTRY_crc32:
784 entry->crc32.offset += e.k->size - len;
786 case BCH_EXTENT_ENTRY_crc64:
787 entry->crc64.offset += e.k->size - len;
789 case BCH_EXTENT_ENTRY_crc128:
790 entry->crc128.offset += e.k->size - len;
794 if (extent_entry_is_crc(entry))
804 bool bch2_cut_front(struct bpos where, struct bkey_i *k)
806 return __bch2_cut_front(where, bkey_i_to_s(k));
809 bool bch2_cut_back(struct bpos where, struct bkey *k)
813 if (bkey_cmp(where, k->p) >= 0)
816 EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
818 len = where.offset - bkey_start_offset(k);
820 BUG_ON(len > k->size);
826 __set_bkey_deleted(k);
832 * bch_key_resize - adjust size of @k
834 * bkey_start_offset(k) will be preserved, modifies where the extent ends
836 void bch2_key_resize(struct bkey *k,
839 k->p.offset -= k->size;
840 k->p.offset += new_size;
845 * In extent_sort_fix_overlapping(), insert_fixup_extent(),
846 * extent_merge_inline() - we're modifying keys in place that are packed. To do
847 * that we have to unpack the key, modify the unpacked key - then this
848 * copies/repacks the unpacked to the original as necessary.
850 static bool __extent_save(struct btree *b, struct btree_node_iter *iter,
851 struct bkey_packed *dst, struct bkey *src)
853 struct bkey_format *f = &b->format;
854 struct bkey_i *dst_unpacked;
857 if ((dst_unpacked = packed_to_bkey(dst))) {
858 dst_unpacked->k = *src;
861 ret = bch2_bkey_pack_key(dst, src, f);
865 bch2_verify_key_order(b, iter, dst);
870 static void extent_save(struct btree *b, struct btree_node_iter *iter,
871 struct bkey_packed *dst, struct bkey *src)
873 BUG_ON(!__extent_save(b, iter, dst, src));
877 * If keys compare equal, compare by pointer order:
879 * Necessary for sort_fix_overlapping() - if there are multiple keys that
880 * compare equal in different sets, we have to process them newest to oldest.
882 #define extent_sort_cmp(h, l, r) \
884 struct bkey _ul = bkey_unpack_key(b, \
885 __btree_node_offset_to_key(b, (l).k)); \
886 struct bkey _ur = bkey_unpack_key(b, \
887 __btree_node_offset_to_key(b, (r).k)); \
889 bkey_cmp(bkey_start_pos(&_ul), \
890 bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
893 static inline void extent_sort_sift(struct btree_node_iter *iter,
894 struct btree *b, size_t i)
896 heap_sift_down(iter, i, extent_sort_cmp);
899 static inline void extent_sort_next(struct btree_node_iter *iter,
901 struct btree_node_iter_set *i)
903 sort_key_next(iter, b, i);
904 heap_sift_down(iter, i - iter->data, extent_sort_cmp);
907 static void extent_sort_append(struct bch_fs *c,
909 struct btree_nr_keys *nr,
910 struct bkey_packed *start,
911 struct bkey_packed **prev,
912 struct bkey_packed *k)
914 struct bkey_format *f = &b->format;
917 if (bkey_whiteout(k))
920 bch2_bkey_unpack(b, &tmp.k, k);
923 bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
927 bch2_bkey_pack(*prev, (void *) *prev, f);
929 btree_keys_account_key_add(nr, 0, *prev);
930 *prev = bkey_next(*prev);
935 bkey_copy(*prev, &tmp.k);
938 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
941 struct btree_node_iter *iter)
943 struct bkey_format *f = &b->format;
944 struct btree_node_iter_set *_l = iter->data, *_r;
945 struct bkey_packed *prev = NULL, *out, *lk, *rk;
946 struct bkey l_unpacked, r_unpacked;
948 struct btree_nr_keys nr;
950 memset(&nr, 0, sizeof(nr));
952 heap_resort(iter, extent_sort_cmp);
954 while (!bch2_btree_node_iter_end(iter)) {
955 lk = __btree_node_offset_to_key(b, _l->k);
957 if (iter->used == 1) {
958 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
959 extent_sort_next(iter, b, _l);
964 if (iter->used > 2 &&
965 extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
968 rk = __btree_node_offset_to_key(b, _r->k);
970 l = __bkey_disassemble(b, lk, &l_unpacked);
971 r = __bkey_disassemble(b, rk, &r_unpacked);
973 /* If current key and next key don't overlap, just append */
974 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
975 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
976 extent_sort_next(iter, b, _l);
980 /* Skip 0 size keys */
982 extent_sort_next(iter, b, _r);
987 * overlap: keep the newer key and trim the older key so they
988 * don't overlap. comparing pointers tells us which one is
989 * newer, since the bsets are appended one after the other.
992 /* can't happen because of comparison func */
993 BUG_ON(_l->k < _r->k &&
994 !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
998 if (bkey_cmp(l.k->p, r.k->p) >= 0) {
999 sort_key_next(iter, b, _r);
1001 __bch2_cut_front(l.k->p, r);
1002 extent_save(b, NULL, rk, r.k);
1005 extent_sort_sift(iter, b, _r - iter->data);
1006 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
1010 * r wins, but it overlaps in the middle of l - split l:
1012 bkey_reassemble(&tmp.k, l.s_c);
1013 bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
1015 __bch2_cut_front(r.k->p, l);
1016 extent_save(b, NULL, lk, l.k);
1018 extent_sort_sift(iter, b, 0);
1020 extent_sort_append(c, b, &nr, dst->start, &prev,
1021 bkey_to_packed(&tmp.k));
1023 bch2_cut_back(bkey_start_pos(r.k), l.k);
1024 extent_save(b, NULL, lk, l.k);
1029 bch2_bkey_pack(prev, (void *) prev, f);
1030 btree_keys_account_key_add(&nr, 0, prev);
1031 out = bkey_next(prev);
1036 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
1040 struct extent_insert_state {
1041 struct btree_insert *trans;
1042 struct btree_insert_entry *insert;
1043 struct bpos committed;
1044 struct bch_fs_usage stats;
1047 struct bkey_i whiteout;
1052 static void bch2_add_sectors(struct extent_insert_state *s,
1053 struct bkey_s_c k, u64 offset, s64 sectors)
1055 struct bch_fs *c = s->trans->c;
1056 struct btree *b = s->insert->iter->l[0].b;
1058 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
1063 bch2_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
1064 &s->stats, s->trans->journal_res.seq, 0);
1067 static void bch2_subtract_sectors(struct extent_insert_state *s,
1068 struct bkey_s_c k, u64 offset, s64 sectors)
1070 bch2_add_sectors(s, k, offset, -sectors);
1073 /* These wrappers subtract exactly the sectors that we're removing from @k */
1074 static void bch2_cut_subtract_back(struct extent_insert_state *s,
1075 struct bpos where, struct bkey_s k)
1077 bch2_subtract_sectors(s, k.s_c, where.offset,
1078 k.k->p.offset - where.offset);
1079 bch2_cut_back(where, k.k);
1082 static void bch2_cut_subtract_front(struct extent_insert_state *s,
1083 struct bpos where, struct bkey_s k)
1085 bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
1086 where.offset - bkey_start_offset(k.k));
1087 __bch2_cut_front(where, k);
1090 static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
1093 bch2_subtract_sectors(s, k.s_c,
1094 bkey_start_offset(k.k), k.k->size);
1096 __set_bkey_deleted(k.k);
1099 static bool bch2_extent_merge_inline(struct bch_fs *,
1100 struct btree_iter *,
1101 struct bkey_packed *,
1102 struct bkey_packed *,
1105 #define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC)
1107 static enum btree_insert_ret
1108 extent_insert_should_stop(struct extent_insert_state *s)
1110 struct btree *b = s->insert->iter->l[0].b;
1113 * Check if we have sufficient space in both the btree node and the
1114 * journal reservation:
1116 * Each insert checks for room in the journal entry, but we check for
1117 * room in the btree node up-front. In the worst case, bkey_cmpxchg()
1118 * will insert two keys, and one iteration of this room will insert one
1119 * key, so we need room for three keys.
1121 if (!bch2_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s))
1122 return BTREE_INSERT_BTREE_NODE_FULL;
1123 else if (!journal_res_insert_fits(s->trans, s->insert))
1124 return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */
1126 return BTREE_INSERT_OK;
1129 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
1130 struct bkey_i *insert)
1132 struct btree_iter_level *l = &iter->l[0];
1133 struct bset_tree *t = bset_tree_last(l->b);
1134 struct bkey_packed *where =
1135 bch2_btree_node_iter_bset_pos(&l->iter, l->b, t);
1136 struct bkey_packed *prev = bch2_bkey_prev(l->b, t, where);
1137 struct bkey_packed *next_live_key = where;
1138 unsigned clobber_u64s;
1141 where = bkey_next(prev);
1143 while (next_live_key != btree_bkey_last(l->b, t) &&
1144 bkey_deleted(next_live_key))
1145 next_live_key = bkey_next(next_live_key);
1148 * Everything between where and next_live_key is now deleted keys, and
1151 clobber_u64s = (u64 *) next_live_key - (u64 *) where;
1154 bch2_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
1155 goto drop_deleted_keys;
1157 if (next_live_key != btree_bkey_last(l->b, t) &&
1158 bch2_extent_merge_inline(c, iter, bkey_to_packed(insert),
1159 next_live_key, false))
1160 goto drop_deleted_keys;
1162 bch2_bset_insert(l->b, &l->iter, where, insert, clobber_u64s);
1163 bch2_btree_node_iter_fix(iter, l->b, &l->iter, t, where,
1164 clobber_u64s, where->u64s);
1167 bch2_bset_delete(l->b, where, clobber_u64s);
1168 bch2_btree_node_iter_fix(iter, l->b, &l->iter, t,
1169 where, clobber_u64s, 0);
1172 static void extent_insert_committed(struct extent_insert_state *s)
1174 struct bch_fs *c = s->trans->c;
1175 struct btree_iter *iter = s->insert->iter;
1176 struct bkey_i *insert = !s->deleting
1179 BKEY_PADDED(k) split;
1181 EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
1182 EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
1184 if (!bkey_cmp(s->committed, bkey_start_pos(&insert->k)))
1187 if (s->deleting && !s->do_journal) {
1188 bch2_cut_front(s->committed, insert);
1192 EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
1194 bkey_copy(&split.k, insert);
1196 if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY) &&
1197 bkey_cmp(s->committed, insert->k.p) &&
1198 bch2_extent_is_compressed(bkey_i_to_s_c(insert))) {
1199 /* XXX: possibly need to increase our reservation? */
1200 bch2_cut_subtract_back(s, s->committed,
1201 bkey_i_to_s(&split.k));
1202 bch2_cut_front(s->committed, insert);
1203 bch2_add_sectors(s, bkey_i_to_s_c(insert),
1204 bkey_start_offset(&insert->k),
1207 bch2_cut_back(s->committed, &split.k.k);
1208 bch2_cut_front(s->committed, insert);
1211 if (debug_check_bkeys(c))
1212 bch2_bkey_debugcheck(c, iter->l[0].b, bkey_i_to_s_c(&split.k));
1214 bch2_btree_journal_key(s->trans, iter, &split.k);
1217 extent_bset_insert(c, iter, &split.k);
1219 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1221 insert->k.needs_whiteout = false;
1222 s->do_journal = false;
1223 s->trans->did_work = true;
1226 static enum btree_insert_ret
1227 __extent_insert_advance_pos(struct extent_insert_state *s,
1228 struct bpos next_pos,
1231 struct extent_insert_hook *hook = s->trans->hook;
1232 enum btree_insert_ret ret;
1235 ret = hook->fn(hook, s->committed, next_pos, k, s->insert->k);
1237 ret = BTREE_INSERT_OK;
1239 EBUG_ON(bkey_deleted(&s->insert->k->k) || !s->insert->k->k.size);
1241 if (ret == BTREE_INSERT_OK)
1242 s->committed = next_pos;
1248 * Update iter->pos, marking how much of @insert we've processed, and call hook
1251 static enum btree_insert_ret
1252 extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
1254 struct btree *b = s->insert->iter->l[0].b;
1255 struct bpos next_pos = bpos_min(s->insert->k->k.p,
1256 k.k ? k.k->p : b->key.k.p);
1257 enum btree_insert_ret ret;
1260 return BTREE_INSERT_NEED_TRAVERSE;
1263 if (k.k && bkey_cmp(s->committed, bkey_start_pos(k.k)) < 0) {
1264 ret = __extent_insert_advance_pos(s, bkey_start_pos(k.k),
1266 if (ret != BTREE_INSERT_OK)
1270 /* avoid redundant calls to hook fn: */
1271 if (!bkey_cmp(s->committed, next_pos))
1272 return BTREE_INSERT_OK;
1274 return __extent_insert_advance_pos(s, next_pos, k);
1277 static enum btree_insert_ret
1278 extent_insert_check_split_compressed(struct extent_insert_state *s,
1280 enum bch_extent_overlap overlap)
1282 struct bch_fs *c = s->trans->c;
1285 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1286 (sectors = bch2_extent_is_compressed(k))) {
1287 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
1289 if (s->trans->flags & BTREE_INSERT_NOFAIL)
1290 flags |= BCH_DISK_RESERVATION_NOFAIL;
1292 switch (bch2_disk_reservation_add(c,
1294 sectors * bch2_extent_nr_dirty_ptrs(k),
1299 return BTREE_INSERT_ENOSPC;
1301 return BTREE_INSERT_NEED_GC_LOCK;
1307 return BTREE_INSERT_OK;
1310 static enum btree_insert_ret
1311 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1312 struct bset_tree *t, struct bkey_packed *_k, struct bkey_s k,
1313 enum bch_extent_overlap overlap)
1315 struct bch_fs *c = s->trans->c;
1316 struct btree_iter *iter = s->insert->iter;
1317 struct btree_iter_level *l = &iter->l[0];
1318 struct btree *b = l->b;
1319 struct btree_node_iter *node_iter = &l->iter;
1320 enum btree_insert_ret ret;
1323 case BCH_EXTENT_OVERLAP_FRONT:
1324 /* insert overlaps with start of k: */
1325 bch2_cut_subtract_front(s, insert->k.p, k);
1326 BUG_ON(bkey_deleted(k.k));
1327 extent_save(b, node_iter, _k, k.k);
1330 case BCH_EXTENT_OVERLAP_BACK:
1331 /* insert overlaps with end of k: */
1332 bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
1333 BUG_ON(bkey_deleted(k.k));
1334 extent_save(b, node_iter, _k, k.k);
1337 * As the auxiliary tree is indexed by the end of the
1338 * key and we've just changed the end, update the
1341 bch2_bset_fix_invalidated_key(b, t, _k);
1342 bch2_btree_node_iter_fix(iter, b, node_iter, t,
1343 _k, _k->u64s, _k->u64s);
1346 case BCH_EXTENT_OVERLAP_ALL: {
1347 struct bpos orig_pos = k.k->p;
1349 /* The insert key completely covers k, invalidate k */
1350 if (!bkey_whiteout(k.k))
1351 btree_keys_account_key_drop(&b->nr,
1354 bch2_drop_subtract(s, k);
1355 k.k->p = bkey_start_pos(&insert->k);
1356 if (!__extent_save(b, node_iter, _k, k.k)) {
1358 * Couldn't repack: we aren't necessarily able
1359 * to repack if the new key is outside the range
1360 * of the old extent, so we have to split
1364 extent_save(b, node_iter, _k, k.k);
1366 ret = extent_insert_advance_pos(s, k.s_c);
1367 if (ret != BTREE_INSERT_OK)
1370 extent_insert_committed(s);
1372 * We split and inserted upto at k.k->p - that
1373 * has to coincide with iter->pos, so that we
1374 * don't have anything more we have to insert
1375 * until we recheck our journal reservation:
1377 EBUG_ON(bkey_cmp(s->committed, k.k->p));
1379 bch2_bset_fix_invalidated_key(b, t, _k);
1380 bch2_btree_node_iter_fix(iter, b, node_iter, t,
1381 _k, _k->u64s, _k->u64s);
1386 case BCH_EXTENT_OVERLAP_MIDDLE: {
1387 BKEY_PADDED(k) split;
1389 * The insert key falls 'in the middle' of k
1390 * The insert key splits k in 3:
1391 * - start only in k, preserve
1392 * - middle common section, invalidate in k
1393 * - end only in k, preserve
1395 * We update the old key to preserve the start,
1396 * insert will be the new common section,
1397 * we manually insert the end that we are preserving.
1399 * modify k _before_ doing the insert (which will move
1402 bkey_reassemble(&split.k, k.s_c);
1403 split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
1405 bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1406 BUG_ON(bkey_deleted(&split.k.k));
1408 bch2_cut_subtract_front(s, insert->k.p, k);
1409 BUG_ON(bkey_deleted(k.k));
1410 extent_save(b, node_iter, _k, k.k);
1412 bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
1413 bkey_start_offset(&split.k.k),
1415 extent_bset_insert(c, iter, &split.k);
1420 return BTREE_INSERT_OK;
1423 static enum btree_insert_ret
1424 bch2_delete_fixup_extent(struct extent_insert_state *s)
1426 struct bch_fs *c = s->trans->c;
1427 struct btree_iter *iter = s->insert->iter;
1428 struct btree_iter_level *l = &iter->l[0];
1429 struct btree *b = l->b;
1430 struct btree_node_iter *node_iter = &l->iter;
1431 struct bkey_packed *_k;
1432 struct bkey unpacked;
1433 struct bkey_i *insert = s->insert->k;
1434 enum btree_insert_ret ret = BTREE_INSERT_OK;
1436 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1438 s->whiteout = *insert;
1439 s->do_journal = false;
1441 while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1442 (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
1443 (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
1444 struct bset_tree *t = bch2_bkey_to_bset(b, _k);
1445 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1446 enum bch_extent_overlap overlap;
1448 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1449 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1451 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1454 if (bkey_whiteout(k.k)) {
1455 s->committed = bpos_min(insert->k.p, k.k->p);
1459 overlap = bch2_extent_overlap(&insert->k, k.k);
1461 ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
1462 if (ret != BTREE_INSERT_OK)
1465 ret = extent_insert_advance_pos(s, k.s_c);
1469 s->do_journal = true;
1471 if (overlap == BCH_EXTENT_OVERLAP_ALL) {
1472 btree_keys_account_key_drop(&b->nr,
1474 bch2_subtract_sectors(s, k.s_c,
1475 bkey_start_offset(k.k), k.k->size);
1476 _k->type = KEY_TYPE_DISCARD;
1477 reserve_whiteout(b, t, _k);
1478 } else if (k.k->needs_whiteout ||
1479 bset_written(b, bset(b, t))) {
1480 struct bkey_i discard = *insert;
1483 case BCH_EXTENT_OVERLAP_FRONT:
1484 bch2_cut_front(bkey_start_pos(k.k), &discard);
1486 case BCH_EXTENT_OVERLAP_BACK:
1487 bch2_cut_back(k.k->p, &discard.k);
1493 discard.k.needs_whiteout = true;
1495 ret = extent_squash(s, insert, t, _k, k, overlap);
1496 BUG_ON(ret != BTREE_INSERT_OK);
1498 extent_bset_insert(c, iter, &discard);
1500 ret = extent_squash(s, insert, t, _k, k, overlap);
1501 BUG_ON(ret != BTREE_INSERT_OK);
1504 bch2_cut_front(s->committed, insert);
1505 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1508 if (ret == BTREE_INSERT_OK &&
1509 bkey_cmp(s->committed, insert->k.p) < 0)
1510 ret = extent_insert_advance_pos(s, bkey_s_c_null);
1512 extent_insert_committed(s);
1514 bch2_fs_usage_apply(c, &s->stats, s->trans->disk_res,
1515 gc_pos_btree_node(b));
1517 EBUG_ON(bkey_cmp(iter->pos, s->committed));
1518 EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
1519 !!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
1521 bch2_cut_front(iter->pos, insert);
1523 if (insert->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
1524 ret = BTREE_INSERT_NEED_TRAVERSE;
1526 EBUG_ON(insert->k.size && ret == BTREE_INSERT_OK);
1532 * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1534 * this may result in not actually doing the insert, or inserting some subset
1535 * of the insert key. For cmpxchg operations this is where that logic lives.
1537 * All subsets of @insert that need to be inserted are inserted using
1538 * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
1539 * returns false, setting @iter->pos for the prefix of @insert that actually got
1542 * BSET INVARIANTS: this function is responsible for maintaining all the
1543 * invariants for bsets of extents in memory. things get really hairy with 0
1548 * bkey_start_pos(bkey_next(k)) >= k
1549 * or bkey_start_offset(bkey_next(k)) >= k->offset
1551 * i.e. strict ordering, no overlapping extents.
1553 * multiple bsets (i.e. full btree node):
1556 * k.size != 0 ∧ j.size != 0 →
1557 * ¬ (k > bkey_start_pos(j) ∧ k < j)
1559 * i.e. no two overlapping keys _of nonzero size_
1561 * We can't realistically maintain this invariant for zero size keys because of
1562 * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
1563 * there may be another 0 size key between them in another bset, and it will
1564 * thus overlap with the merged key.
1566 * In addition, the end of iter->pos indicates how much has been processed.
1567 * If the end of iter->pos is not the same as the end of insert, then
1568 * key insertion needs to continue/be retried.
1570 enum btree_insert_ret
1571 bch2_insert_fixup_extent(struct btree_insert *trans,
1572 struct btree_insert_entry *insert)
1574 struct bch_fs *c = trans->c;
1575 struct btree_iter *iter = insert->iter;
1576 struct btree_iter_level *l = &iter->l[0];
1577 struct btree *b = l->b;
1578 struct btree_node_iter *node_iter = &l->iter;
1579 struct bkey_packed *_k;
1580 struct bkey unpacked;
1581 enum btree_insert_ret ret = BTREE_INSERT_OK;
1583 struct extent_insert_state s = {
1586 .committed = insert->iter->pos,
1587 .deleting = bkey_whiteout(&insert->k->k),
1590 EBUG_ON(iter->level);
1591 EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size);
1594 return bch2_delete_fixup_extent(&s);
1597 * As we process overlapping extents, we advance @iter->pos both to
1598 * signal to our caller (btree_insert_key()) how much of @insert->k has
1599 * been inserted, and also to keep @iter->pos consistent with
1600 * @insert->k and the node iterator that we're advancing:
1602 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1604 if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1605 bch2_add_sectors(&s, bkey_i_to_s_c(insert->k),
1606 bkey_start_offset(&insert->k->k),
1609 while (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
1610 (ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK &&
1611 (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
1612 struct bset_tree *t = bch2_bkey_to_bset(b, _k);
1613 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1614 enum bch_extent_overlap overlap;
1616 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1617 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1619 if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
1622 overlap = bch2_extent_overlap(&insert->k->k, k.k);
1624 ret = extent_insert_check_split_compressed(&s, k.s_c, overlap);
1625 if (ret != BTREE_INSERT_OK)
1632 * Only call advance pos & call hook for nonzero size extents:
1634 ret = extent_insert_advance_pos(&s, k.s_c);
1635 if (ret != BTREE_INSERT_OK)
1639 (k.k->needs_whiteout || bset_written(b, bset(b, t))))
1640 insert->k->k.needs_whiteout = true;
1642 if (overlap == BCH_EXTENT_OVERLAP_ALL &&
1643 bkey_whiteout(k.k) &&
1644 k.k->needs_whiteout) {
1645 unreserve_whiteout(b, t, _k);
1646 _k->needs_whiteout = false;
1649 ret = extent_squash(&s, insert->k, t, _k, k, overlap);
1650 if (ret != BTREE_INSERT_OK)
1654 if (ret == BTREE_INSERT_OK &&
1655 bkey_cmp(s.committed, insert->k->k.p) < 0)
1656 ret = extent_insert_advance_pos(&s, bkey_s_c_null);
1658 extent_insert_committed(&s);
1660 * Subtract any remaining sectors from @insert, if we bailed out early
1661 * and didn't fully insert @insert:
1663 if (insert->k->k.size &&
1664 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1665 bch2_subtract_sectors(&s, bkey_i_to_s_c(insert->k),
1666 bkey_start_offset(&insert->k->k),
1669 bch2_fs_usage_apply(c, &s.stats, trans->disk_res,
1670 gc_pos_btree_node(b));
1672 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1673 EBUG_ON(bkey_cmp(iter->pos, s.committed));
1674 EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
1675 !!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
1677 if (insert->k->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
1678 ret = BTREE_INSERT_NEED_TRAVERSE;
1680 EBUG_ON(insert->k->k.size && ret == BTREE_INSERT_OK);
1685 static const char *bch2_extent_invalid(const struct bch_fs *c,
1688 if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
1689 return "value too big";
1692 return "zero key size";
1694 switch (k.k->type) {
1696 case BCH_EXTENT_CACHED: {
1697 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1698 const union bch_extent_entry *entry;
1699 struct bch_extent_crc_unpacked crc;
1700 const struct bch_extent_ptr *ptr;
1701 unsigned size_ondisk = e.k->size;
1703 unsigned nonce = UINT_MAX;
1705 extent_for_each_entry(e, entry) {
1706 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1707 return "invalid extent entry type";
1709 if (extent_entry_is_crc(entry)) {
1710 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
1712 if (crc.offset + e.k->size >
1713 crc.uncompressed_size)
1714 return "checksum offset + key size > uncompressed size";
1716 size_ondisk = crc.compressed_size;
1718 if (!bch2_checksum_type_valid(c, crc.csum_type))
1719 return "invalid checksum type";
1721 if (crc.compression_type >= BCH_COMPRESSION_NR)
1722 return "invalid compression type";
1724 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1725 if (nonce == UINT_MAX)
1726 nonce = crc.offset + crc.nonce;
1727 else if (nonce != crc.offset + crc.nonce)
1728 return "incorrect nonce";
1731 ptr = entry_to_ptr(entry);
1733 reason = extent_ptr_invalid(c, e, &entry->ptr,
1734 size_ondisk, false);
1743 case BCH_RESERVATION: {
1744 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1746 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1747 return "incorrect value size";
1749 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1750 return "invalid nr_replicas";
1756 return "invalid value type";
1760 static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
1761 struct bkey_s_c_extent e)
1763 const struct bch_extent_ptr *ptr;
1765 struct bucket_mark mark;
1766 unsigned seq, stale;
1769 unsigned ptrs_per_tier[BCH_TIER_MAX];
1770 unsigned replicas = 0;
1773 * XXX: we should be doing most/all of these checks at startup time,
1774 * where we check bch2_bkey_invalid() in btree_node_read_done()
1776 * But note that we can't check for stale pointers or incorrect gc marks
1777 * until after journal replay is done (it might be an extent that's
1778 * going to get overwritten during replay)
1781 memset(ptrs_per_tier, 0, sizeof(ptrs_per_tier));
1783 extent_for_each_ptr(e, ptr) {
1784 ca = bch_dev_bkey_exists(c, ptr->dev);
1786 ptrs_per_tier[ca->mi.tier]++;
1789 * If journal replay hasn't finished, we might be seeing keys
1790 * that will be overwritten by the time journal replay is done:
1792 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1798 seq = read_seqcount_begin(&c->gc_pos_lock);
1799 mark = ptr_bucket_mark(ca, ptr);
1801 /* between mark and bucket gen */
1804 stale = ptr_stale(ca, ptr);
1806 bch2_fs_bug_on(stale && !ptr->cached, c,
1807 "stale dirty pointer");
1809 bch2_fs_bug_on(stale > 96, c,
1810 "key too stale: %i",
1816 bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
1817 (mark.data_type != BCH_DATA_USER ||
1819 ? mark.cached_sectors
1820 : mark.dirty_sectors));
1821 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
1827 if (replicas > BCH_REPLICAS_MAX) {
1828 bch2_bkey_val_to_text(c, btree_node_type(b), buf,
1829 sizeof(buf), e.s_c);
1831 "extent key bad (too many replicas: %u): %s",
1836 if (!bkey_extent_is_cached(e.k) &&
1837 !bch2_bkey_replicas_marked(c, BCH_DATA_USER, e.s_c)) {
1838 bch2_bkey_val_to_text(c, btree_node_type(b),
1839 buf, sizeof(buf), e.s_c);
1841 "extent key bad (replicas not marked in superblock):\n%s",
1849 bch2_bkey_val_to_text(c, btree_node_type(b), buf,
1850 sizeof(buf), e.s_c);
1851 bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu "
1852 "gen %i type %u", buf,
1853 PTR_BUCKET_NR(ca, ptr), mark.gen, mark.data_type);
1857 static void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b,
1860 switch (k.k->type) {
1862 case BCH_EXTENT_CACHED:
1863 bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
1865 case BCH_RESERVATION:
1872 static void bch2_extent_to_text(struct bch_fs *c, char *buf,
1873 size_t size, struct bkey_s_c k)
1875 char *out = buf, *end = buf + size;
1876 const char *invalid;
1878 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
1880 if (bkey_extent_is_data(k.k))
1881 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
1883 invalid = bch2_extent_invalid(c, k);
1885 p(" invalid: %s", invalid);
1889 static unsigned PTR_TIER(struct bch_fs *c,
1890 const struct bch_extent_ptr *ptr)
1892 return bch_dev_bkey_exists(c, ptr->dev)->mi.tier;
1895 static void bch2_extent_crc_init(union bch_extent_crc *crc,
1896 struct bch_extent_crc_unpacked new)
1898 #define common_fields(_crc) \
1899 .csum_type = _crc.csum_type, \
1900 .compression_type = _crc.compression_type, \
1901 ._compressed_size = _crc.compressed_size - 1, \
1902 ._uncompressed_size = _crc.uncompressed_size - 1, \
1903 .offset = _crc.offset
1905 if (bch_crc_bytes[new.csum_type] <= 4 &&
1906 new.uncompressed_size <= CRC32_SIZE_MAX &&
1907 new.nonce <= CRC32_NONCE_MAX) {
1908 crc->crc32 = (struct bch_extent_crc32) {
1909 .type = 1 << BCH_EXTENT_ENTRY_crc32,
1911 .csum = *((__le32 *) &new.csum.lo),
1916 if (bch_crc_bytes[new.csum_type] <= 10 &&
1917 new.uncompressed_size <= CRC64_SIZE_MAX &&
1918 new.nonce <= CRC64_NONCE_MAX) {
1919 crc->crc64 = (struct bch_extent_crc64) {
1920 .type = 1 << BCH_EXTENT_ENTRY_crc64,
1923 .csum_lo = new.csum.lo,
1924 .csum_hi = *((__le16 *) &new.csum.hi),
1929 if (bch_crc_bytes[new.csum_type] <= 16 &&
1930 new.uncompressed_size <= CRC128_SIZE_MAX &&
1931 new.nonce <= CRC128_NONCE_MAX) {
1932 crc->crc128 = (struct bch_extent_crc128) {
1933 .type = 1 << BCH_EXTENT_ENTRY_crc128,
1940 #undef common_fields
1944 void bch2_extent_crc_append(struct bkey_i_extent *e,
1945 struct bch_extent_crc_unpacked new)
1947 struct bch_extent_crc_unpacked crc;
1948 const union bch_extent_entry *i;
1950 BUG_ON(new.compressed_size > new.uncompressed_size);
1951 BUG_ON(new.live_size != e->k.size);
1952 BUG_ON(!new.compressed_size || !new.uncompressed_size);
1955 * Look up the last crc entry, so we can check if we need to add
1958 extent_for_each_crc(extent_i_to_s(e), crc, i)
1961 if (!memcmp(&crc, &new, sizeof(crc)))
1964 bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), new);
1965 __extent_entry_push(e);
1969 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1971 * Returns true if @k should be dropped entirely
1973 * For existing keys, only called when btree nodes are being rewritten, not when
1974 * they're merely being compacted/resorted in memory.
1976 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1978 struct bkey_s_extent e;
1980 switch (k.k->type) {
1981 case KEY_TYPE_ERROR:
1984 case KEY_TYPE_DELETED:
1985 case KEY_TYPE_COOKIE:
1988 case KEY_TYPE_DISCARD:
1989 return bversion_zero(k.k->version);
1992 case BCH_EXTENT_CACHED:
1993 e = bkey_s_to_extent(k);
1995 bch2_extent_drop_stale(c, e);
1997 if (!bkey_val_u64s(e.k)) {
1998 if (bkey_extent_is_cached(e.k)) {
1999 k.k->type = KEY_TYPE_DISCARD;
2000 if (bversion_zero(k.k->version))
2003 k.k->type = KEY_TYPE_ERROR;
2008 case BCH_RESERVATION:
2015 void bch2_extent_mark_replicas_cached(struct bch_fs *c,
2016 struct bkey_s_extent e,
2017 unsigned nr_desired_replicas)
2019 struct bch_extent_ptr *ptr;
2020 unsigned tier = 0, nr_cached = 0;
2021 unsigned nr_good = bch2_extent_nr_good_ptrs(c, e.c);
2022 bool have_higher_tier;
2024 if (nr_good <= nr_desired_replicas)
2027 nr_cached = nr_good - nr_desired_replicas;
2030 have_higher_tier = false;
2032 extent_for_each_ptr(e, ptr) {
2034 PTR_TIER(c, ptr) == tier) {
2041 if (PTR_TIER(c, ptr) > tier)
2042 have_higher_tier = true;
2046 } while (have_higher_tier);
2050 * This picks a non-stale pointer, preferabbly from a device other than
2051 * avoid. Avoid can be NULL, meaning pick any. If there are no non-stale
2052 * pointers to other devices, it will still pick a pointer from avoid.
2053 * Note that it prefers lowered-numbered pointers to higher-numbered pointers
2054 * as the pointers are sorted by tier, hence preferring pointers to tier 0
2055 * rather than pointers to tier 1.
2057 void bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
2058 struct bch_devs_mask *avoid,
2059 struct extent_pick_ptr *ret)
2061 struct bkey_s_c_extent e;
2063 switch (k.k->type) {
2064 case KEY_TYPE_DELETED:
2065 case KEY_TYPE_DISCARD:
2066 case KEY_TYPE_COOKIE:
2070 case KEY_TYPE_ERROR:
2071 ret->ca = ERR_PTR(-EIO);
2075 case BCH_EXTENT_CACHED:
2076 e = bkey_s_c_to_extent(k);
2079 extent_pick_read_device(c, bkey_s_c_to_extent(k), avoid, ret);
2081 if (!ret->ca && !bkey_extent_is_cached(e.k))
2082 ret->ca = ERR_PTR(-EIO);
2085 case BCH_RESERVATION:
2094 static enum merge_result bch2_extent_merge(struct bch_fs *c,
2096 struct bkey_i *l, struct bkey_i *r)
2098 struct bkey_s_extent el, er;
2099 union bch_extent_entry *en_l, *en_r;
2101 if (key_merging_disabled(c))
2102 return BCH_MERGE_NOMERGE;
2105 * Generic header checks
2106 * Assumes left and right are in order
2107 * Left and right must be exactly aligned
2110 if (l->k.u64s != r->k.u64s ||
2111 l->k.type != r->k.type ||
2112 bversion_cmp(l->k.version, r->k.version) ||
2113 bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
2114 return BCH_MERGE_NOMERGE;
2116 switch (l->k.type) {
2117 case KEY_TYPE_DELETED:
2118 case KEY_TYPE_DISCARD:
2119 case KEY_TYPE_ERROR:
2120 /* These types are mergeable, and no val to check */
2124 case BCH_EXTENT_CACHED:
2125 el = bkey_i_to_s_extent(l);
2126 er = bkey_i_to_s_extent(r);
2128 extent_for_each_entry(el, en_l) {
2129 struct bch_extent_ptr *lp, *rp;
2132 en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
2134 if ((extent_entry_type(en_l) !=
2135 extent_entry_type(en_r)) ||
2136 extent_entry_is_crc(en_l))
2137 return BCH_MERGE_NOMERGE;
2142 if (lp->offset + el.k->size != rp->offset ||
2143 lp->dev != rp->dev ||
2145 return BCH_MERGE_NOMERGE;
2147 /* We don't allow extents to straddle buckets: */
2148 ca = bch_dev_bkey_exists(c, lp->dev);
2150 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
2151 return BCH_MERGE_NOMERGE;
2155 case BCH_RESERVATION: {
2156 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
2157 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
2159 if (li->v.generation != ri->v.generation ||
2160 li->v.nr_replicas != ri->v.nr_replicas)
2161 return BCH_MERGE_NOMERGE;
2165 return BCH_MERGE_NOMERGE;
2168 l->k.needs_whiteout |= r->k.needs_whiteout;
2170 /* Keys with no pointers aren't restricted to one bucket and could
2173 if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
2174 bch2_key_resize(&l->k, KEY_SIZE_MAX);
2175 bch2_cut_front(l->k.p, r);
2176 return BCH_MERGE_PARTIAL;
2179 bch2_key_resize(&l->k, l->k.size + r->k.size);
2181 return BCH_MERGE_MERGE;
2184 static void extent_i_save(struct btree *b, struct bkey_packed *dst,
2187 struct bkey_format *f = &b->format;
2188 struct bkey_i *dst_unpacked;
2190 BUG_ON(bkeyp_val_u64s(f, dst) != bkey_val_u64s(&src->k));
2193 * We don't want the bch2_verify_key_order() call in extent_save(),
2194 * because we may be out of order with deleted keys that are about to be
2195 * removed by extent_bset_insert()
2198 if ((dst_unpacked = packed_to_bkey(dst)))
2199 bkey_copy(dst_unpacked, src);
2201 BUG_ON(!bch2_bkey_pack(dst, src, f));
2204 static bool extent_merge_one_overlapping(struct btree_iter *iter,
2205 struct bpos new_pos,
2206 struct bset_tree *t,
2207 struct bkey_packed *k, struct bkey uk,
2208 bool check, bool could_pack)
2210 struct btree_iter_level *l = &iter->l[0];
2212 BUG_ON(!bkey_deleted(k));
2215 return !bkey_packed(k) || could_pack;
2218 extent_save(l->b, &l->iter, k, &uk);
2219 bch2_bset_fix_invalidated_key(l->b, t, k);
2220 bch2_btree_node_iter_fix(iter, l->b, &l->iter, t,
2221 k, k->u64s, k->u64s);
2226 static bool extent_merge_do_overlapping(struct btree_iter *iter,
2227 struct bkey *m, bool back_merge)
2229 struct btree_iter_level *l = &iter->l[0];
2230 struct btree *b = l->b;
2231 struct btree_node_iter *node_iter = &l->iter;
2232 struct bset_tree *t;
2233 struct bkey_packed *k;
2235 struct bpos new_pos = back_merge ? m->p : bkey_start_pos(m);
2236 bool could_pack = bkey_pack_pos((void *) &uk, new_pos, b);
2240 * @m is the new merged extent:
2242 * The merge took place in the last bset; we know there can't be any 0
2243 * size extents overlapping with m there because if so they would have
2244 * been between the two extents we merged.
2246 * But in the other bsets, we have to check for and fix such extents:
2249 for_each_bset(b, t) {
2250 if (t == bset_tree_last(b))
2254 * if we don't find this bset in the iterator we already got to
2255 * the end of that bset, so start searching from the end.
2257 k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
2259 if (k == btree_bkey_last(b, t))
2260 k = bch2_bkey_prev_all(b, t, k);
2266 * Back merge: 0 size extents will be before the key
2267 * that was just inserted (and thus the iterator
2268 * position) - walk backwards to find them
2272 (uk = bkey_unpack_key(b, k),
2273 bkey_cmp(uk.p, bkey_start_pos(m)) > 0);
2274 k = bch2_bkey_prev_all(b, t, k)) {
2275 if (bkey_cmp(uk.p, m->p) >= 0)
2278 if (!extent_merge_one_overlapping(iter, new_pos,
2279 t, k, uk, check, could_pack))
2283 /* Front merge - walk forwards */
2285 k != btree_bkey_last(b, t) &&
2286 (uk = bkey_unpack_key(b, k),
2287 bkey_cmp(uk.p, m->p) < 0);
2290 bkey_start_pos(m)) <= 0)
2293 if (!extent_merge_one_overlapping(iter, new_pos,
2294 t, k, uk, check, could_pack))
2309 * When merging an extent that we're inserting into a btree node, the new merged
2310 * extent could overlap with an existing 0 size extent - if we don't fix that,
2311 * it'll break the btree node iterator so this code finds those 0 size extents
2312 * and shifts them out of the way.
2314 * Also unpacks and repacks.
2316 static bool bch2_extent_merge_inline(struct bch_fs *c,
2317 struct btree_iter *iter,
2318 struct bkey_packed *l,
2319 struct bkey_packed *r,
2322 struct btree *b = iter->l[0].b;
2323 struct btree_node_iter *node_iter = &iter->l[0].iter;
2324 const struct bkey_format *f = &b->format;
2325 struct bset_tree *t = bset_tree_last(b);
2326 struct bkey_packed *m;
2333 * We need to save copies of both l and r, because we might get a
2334 * partial merge (which modifies both) and then fails to repack
2336 bch2_bkey_unpack(b, &li.k, l);
2337 bch2_bkey_unpack(b, &ri.k, r);
2339 m = back_merge ? l : r;
2340 mi = back_merge ? &li.k : &ri.k;
2342 /* l & r should be in last bset: */
2343 EBUG_ON(bch2_bkey_to_bset(b, m) != t);
2345 switch (bch2_extent_merge(c, b, &li.k, &ri.k)) {
2346 case BCH_MERGE_NOMERGE:
2348 case BCH_MERGE_PARTIAL:
2349 if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &mi->k, f))
2352 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2355 extent_i_save(b, m, mi);
2356 bch2_bset_fix_invalidated_key(b, t, m);
2359 * Update iterator to reflect what we just inserted - otherwise,
2360 * the iter_fix() call is going to put us _before_ the key we
2361 * just partially merged with:
2364 bch2_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
2366 bch2_btree_node_iter_fix(iter, b, node_iter,
2367 t, m, m->u64s, m->u64s);
2370 bkey_copy(packed_to_bkey(l), &li.k);
2372 bkey_copy(packed_to_bkey(r), &ri.k);
2374 case BCH_MERGE_MERGE:
2375 if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &li.k.k, f))
2378 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2381 extent_i_save(b, m, &li.k);
2382 bch2_bset_fix_invalidated_key(b, t, m);
2384 bch2_btree_node_iter_fix(iter, b, node_iter,
2385 t, m, m->u64s, m->u64s);
2392 int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size)
2394 struct btree_iter iter;
2395 struct bpos end = pos;
2401 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos,
2402 BTREE_ITER_SLOTS, k) {
2403 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2406 if (!bch2_extent_is_fully_allocated(k)) {
2411 bch2_btree_iter_unlock(&iter);
2416 const struct bkey_ops bch2_bkey_extent_ops = {
2417 .key_invalid = bch2_extent_invalid,
2418 .key_debugcheck = bch2_extent_debugcheck,
2419 .val_to_text = bch2_extent_to_text,
2420 .swab = bch2_ptr_swab,
2421 .key_normalize = bch2_ptr_normalize,
2422 .key_merge = bch2_extent_merge,