1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
12 #include "btree_update.h"
13 #include "btree_update_interior.h"
18 #include "disk_groups.h"
29 #include <trace/events/bcachefs.h>
31 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
33 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
34 const struct bch_extent_ptr *ptr;
37 bkey_for_each_ptr(p, ptr)
43 unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
48 case KEY_TYPE_btree_ptr:
50 case KEY_TYPE_reflink_v: {
51 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
52 const struct bch_extent_ptr *ptr;
54 bkey_for_each_ptr(p, ptr)
55 nr_ptrs += !ptr->cached;
59 case KEY_TYPE_reservation:
60 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
67 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
68 struct extent_ptr_decoded p)
70 unsigned i, durability = 0;
76 ca = bch_dev_bkey_exists(c, p.ptr.dev);
78 if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
79 durability = max_t(unsigned, durability, ca->mi.durability);
81 for (i = 0; i < p.ec_nr; i++) {
83 genradix_ptr(&c->stripes[0], p.idx);
88 durability = max_t(unsigned, durability, s->nr_redundant);
94 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
96 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
97 const union bch_extent_entry *entry;
98 struct extent_ptr_decoded p;
99 unsigned durability = 0;
101 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
102 durability += bch2_extent_ptr_durability(c, p);
107 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
110 struct bch_dev_io_failures *i;
112 for (i = f->devs; i < f->devs + f->nr; i++)
119 void bch2_mark_io_failure(struct bch_io_failures *failed,
120 struct extent_ptr_decoded *p)
122 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
125 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
127 f = &failed->devs[failed->nr++];
132 } else if (p->idx != f->idx) {
142 * returns true if p1 is better than p2:
144 static inline bool ptr_better(struct bch_fs *c,
145 const struct extent_ptr_decoded p1,
146 const struct extent_ptr_decoded p2)
148 if (likely(!p1.idx && !p2.idx)) {
149 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
150 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
152 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
153 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
155 /* Pick at random, biased in favor of the faster device: */
157 return bch2_rand_range(l1 + l2) > l1;
160 if (force_reconstruct_read(c))
161 return p1.idx > p2.idx;
163 return p1.idx < p2.idx;
167 * This picks a non-stale pointer, preferably from a device other than @avoid.
168 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
169 * other devices, it will still pick a pointer from avoid.
171 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
172 struct bch_io_failures *failed,
173 struct extent_ptr_decoded *pick)
175 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
176 const union bch_extent_entry *entry;
177 struct extent_ptr_decoded p;
178 struct bch_dev_io_failures *f;
182 if (k.k->type == KEY_TYPE_error)
185 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
186 ca = bch_dev_bkey_exists(c, p.ptr.dev);
189 * If there are any dirty pointers it's an error if we can't
192 if (!ret && !p.ptr.cached)
195 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
198 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
200 p.idx = f->nr_failed < f->nr_retries
205 !bch2_dev_is_readable(ca))
208 if (force_reconstruct_read(c) &&
212 if (p.idx >= p.ec_nr + 1)
215 if (ret > 0 && !ptr_better(c, p, *pick))
225 void bch2_bkey_append_ptr(struct bkey_i *k,
226 struct bch_extent_ptr ptr)
228 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
231 case KEY_TYPE_btree_ptr:
232 case KEY_TYPE_extent:
233 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
235 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
237 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
247 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
249 struct bch_extent_ptr *ptr;
251 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
254 const struct bch_extent_ptr *
255 bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
257 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
258 const struct bch_extent_ptr *ptr;
260 bkey_for_each_ptr(ptrs, ptr)
267 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
269 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
270 const struct bch_extent_ptr *ptr;
272 bkey_for_each_ptr(ptrs, ptr)
273 if (bch2_dev_in_target(c, ptr->dev, target) &&
275 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
281 /* extent specific utility code */
283 const struct bch_extent_ptr *
284 bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
286 const struct bch_extent_ptr *ptr;
288 extent_for_each_ptr(e, ptr)
295 const struct bch_extent_ptr *
296 bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
298 const struct bch_extent_ptr *ptr;
300 extent_for_each_ptr(e, ptr) {
301 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
304 ca->mi.group - 1 == group)
311 unsigned bch2_extent_is_compressed(struct bkey_s_c k)
313 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
314 const union bch_extent_entry *entry;
315 struct extent_ptr_decoded p;
318 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
320 p.crc.compression_type != BCH_COMPRESSION_NONE)
321 ret += p.crc.compressed_size;
326 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
327 struct bch_extent_ptr m, u64 offset)
329 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
330 const union bch_extent_entry *entry;
331 struct extent_ptr_decoded p;
333 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
334 if (p.ptr.dev == m.dev &&
335 p.ptr.gen == m.gen &&
336 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
337 (s64) m.offset - offset)
343 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
344 union bch_extent_entry *entry)
346 union bch_extent_entry *i = ptrs.start;
351 while (extent_entry_next(i) != entry)
352 i = extent_entry_next(i);
356 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
357 struct bch_extent_ptr *ptr)
359 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
360 union bch_extent_entry *dst, *src, *prev;
361 bool drop_crc = true;
363 EBUG_ON(ptr < &ptrs.start->ptr ||
364 ptr >= &ptrs.end->ptr);
365 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
367 src = extent_entry_next(to_entry(ptr));
368 if (src != ptrs.end &&
369 !extent_entry_is_crc(src))
373 while ((prev = extent_entry_prev(ptrs, dst))) {
374 if (extent_entry_is_ptr(prev))
377 if (extent_entry_is_crc(prev)) {
386 memmove_u64s_down(dst, src,
387 (u64 *) ptrs.end - (u64 *) src);
388 k.k->u64s -= (u64 *) src - (u64 *) dst;
393 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
394 struct bch_extent_crc_unpacked n)
396 return !u.compression_type &&
398 u.uncompressed_size > u.live_size &&
399 bch2_csum_type_is_encryption(u.csum_type) ==
400 bch2_csum_type_is_encryption(n.csum_type);
403 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
404 struct bch_extent_crc_unpacked n)
406 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
407 struct bch_extent_crc_unpacked crc;
408 const union bch_extent_entry *i;
413 bkey_for_each_crc(k.k, ptrs, crc, i)
414 if (can_narrow_crc(crc, n))
421 * We're writing another replica for this extent, so while we've got the data in
422 * memory we'll be computing a new checksum for the currently live data.
424 * If there are other replicas we aren't moving, and they are checksummed but
425 * not compressed, we can modify them to point to only the data that is
426 * currently live (so that readers won't have to bounce) while we've got the
429 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
431 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
432 struct bch_extent_crc_unpacked u;
433 struct extent_ptr_decoded p;
434 union bch_extent_entry *i;
437 /* Find a checksum entry that covers only live data: */
439 bkey_for_each_crc(&k->k, ptrs, u, i)
440 if (!u.compression_type &&
442 u.live_size == u.uncompressed_size) {
449 BUG_ON(n.compression_type);
451 BUG_ON(n.live_size != k->k.size);
453 restart_narrow_pointers:
454 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
456 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
457 if (can_narrow_crc(p.crc, n)) {
458 bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
459 p.ptr.offset += p.crc.offset;
461 bch2_extent_ptr_decoded_append(k, &p);
463 goto restart_narrow_pointers;
469 /* returns true if not equal */
470 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
471 struct bch_extent_crc_unpacked r)
473 return (l.csum_type != r.csum_type ||
474 l.compression_type != r.compression_type ||
475 l.compressed_size != r.compressed_size ||
476 l.uncompressed_size != r.uncompressed_size ||
477 l.offset != r.offset ||
478 l.live_size != r.live_size ||
479 l.nonce != r.nonce ||
480 bch2_crc_cmp(l.csum, r.csum));
483 void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
485 union bch_extent_entry *entry;
486 u64 *d = (u64 *) bkeyp_val(f, k);
489 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
492 for (entry = (union bch_extent_entry *) d;
493 entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
494 entry = extent_entry_next(entry)) {
495 switch (extent_entry_type(entry)) {
496 case BCH_EXTENT_ENTRY_ptr:
498 case BCH_EXTENT_ENTRY_crc32:
499 entry->crc32.csum = swab32(entry->crc32.csum);
501 case BCH_EXTENT_ENTRY_crc64:
502 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
503 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
505 case BCH_EXTENT_ENTRY_crc128:
506 entry->crc128.csum.hi = (__force __le64)
507 swab64((__force u64) entry->crc128.csum.hi);
508 entry->crc128.csum.lo = (__force __le64)
509 swab64((__force u64) entry->crc128.csum.lo);
511 case BCH_EXTENT_ENTRY_stripe_ptr:
517 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
520 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
521 const union bch_extent_entry *entry;
522 struct bch_extent_crc_unpacked crc;
523 const struct bch_extent_ptr *ptr;
524 const struct bch_extent_stripe_ptr *ec;
528 bkey_extent_entry_for_each(ptrs, entry) {
532 switch (__extent_entry_type(entry)) {
533 case BCH_EXTENT_ENTRY_ptr:
534 ptr = entry_to_ptr(entry);
535 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
536 ? bch_dev_bkey_exists(c, ptr->dev)
539 pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
540 (u64) ptr->offset, ptr->gen,
541 ptr->cached ? " cached" : "",
542 ca && ptr_stale(ca, ptr)
545 case BCH_EXTENT_ENTRY_crc32:
546 case BCH_EXTENT_ENTRY_crc64:
547 case BCH_EXTENT_ENTRY_crc128:
548 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
550 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
552 crc.uncompressed_size,
553 crc.offset, crc.nonce,
555 crc.compression_type);
557 case BCH_EXTENT_ENTRY_stripe_ptr:
558 ec = &entry->stripe_ptr;
560 pr_buf(out, "ec: idx %llu block %u",
561 (u64) ec->idx, ec->block);
564 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
572 static const char *extent_ptr_invalid(const struct bch_fs *c,
574 const struct bch_extent_ptr *ptr,
575 unsigned size_ondisk,
578 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
579 const struct bch_extent_ptr *ptr2;
582 if (!bch2_dev_exists2(c, ptr->dev))
583 return "pointer to invalid device";
585 ca = bch_dev_bkey_exists(c, ptr->dev);
587 return "pointer to invalid device";
589 bkey_for_each_ptr(ptrs, ptr2)
590 if (ptr != ptr2 && ptr->dev == ptr2->dev)
591 return "multiple pointers to same device";
593 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
594 return "offset past end of device";
596 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
597 return "offset before first bucket";
599 if (bucket_remainder(ca, ptr->offset) +
600 size_ondisk > ca->mi.bucket_size)
601 return "spans multiple buckets";
606 const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
608 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
609 const union bch_extent_entry *entry;
610 struct bch_extent_crc_unpacked crc;
611 unsigned size_ondisk = k.k->size;
613 unsigned nonce = UINT_MAX;
615 if (k.k->type == KEY_TYPE_btree_ptr)
616 size_ondisk = c->opts.btree_node_size;
618 bkey_extent_entry_for_each(ptrs, entry) {
619 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
620 return "invalid extent entry type";
622 if (k.k->type == KEY_TYPE_btree_ptr &&
623 !extent_entry_is_ptr(entry))
624 return "has non ptr field";
626 switch (extent_entry_type(entry)) {
627 case BCH_EXTENT_ENTRY_ptr:
628 reason = extent_ptr_invalid(c, k, &entry->ptr,
633 case BCH_EXTENT_ENTRY_crc32:
634 case BCH_EXTENT_ENTRY_crc64:
635 case BCH_EXTENT_ENTRY_crc128:
636 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
638 if (crc.offset + crc.live_size >
639 crc.uncompressed_size)
640 return "checksum offset + key size > uncompressed size";
642 size_ondisk = crc.compressed_size;
644 if (!bch2_checksum_type_valid(c, crc.csum_type))
645 return "invalid checksum type";
647 if (crc.compression_type >= BCH_COMPRESSION_NR)
648 return "invalid compression type";
650 if (bch2_csum_type_is_encryption(crc.csum_type)) {
651 if (nonce == UINT_MAX)
652 nonce = crc.offset + crc.nonce;
653 else if (nonce != crc.offset + crc.nonce)
654 return "incorrect nonce";
657 case BCH_EXTENT_ENTRY_stripe_ptr:
667 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
669 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
670 return "value too big";
672 return bch2_bkey_ptrs_invalid(c, k);
675 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
677 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
678 const struct bch_extent_ptr *ptr;
681 struct bucket_mark mark;
684 bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
685 !bch2_bkey_replicas_marked(c, k, false), c,
686 "btree key bad (replicas not marked in superblock):\n%s",
687 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
689 if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
692 bkey_for_each_ptr(ptrs, ptr) {
693 ca = bch_dev_bkey_exists(c, ptr->dev);
695 mark = ptr_bucket_mark(ca, ptr);
698 if (gen_after(mark.gen, ptr->gen))
701 err = "inconsistent";
702 if (mark.data_type != BCH_DATA_BTREE ||
703 mark.dirty_sectors < c->opts.btree_node_size)
709 bch2_bkey_val_to_text(&PBUF(buf), c, k);
710 bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
711 err, buf, PTR_BUCKET_NR(ca, ptr),
712 mark.gen, (unsigned) mark.v.counter);
715 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
718 bch2_bkey_ptrs_to_text(out, c, k);
723 void __bch2_cut_front(struct bpos where, struct bkey_s k)
727 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
730 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
732 sub = where.offset - bkey_start_offset(k.k);
737 k.k->type = KEY_TYPE_deleted;
740 case KEY_TYPE_deleted:
741 case KEY_TYPE_discard:
743 case KEY_TYPE_cookie:
745 case KEY_TYPE_extent:
746 case KEY_TYPE_reflink_v: {
747 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
748 union bch_extent_entry *entry;
749 bool seen_crc = false;
751 bkey_extent_entry_for_each(ptrs, entry) {
752 switch (extent_entry_type(entry)) {
753 case BCH_EXTENT_ENTRY_ptr:
755 entry->ptr.offset += sub;
757 case BCH_EXTENT_ENTRY_crc32:
758 entry->crc32.offset += sub;
760 case BCH_EXTENT_ENTRY_crc64:
761 entry->crc64.offset += sub;
763 case BCH_EXTENT_ENTRY_crc128:
764 entry->crc128.offset += sub;
766 case BCH_EXTENT_ENTRY_stripe_ptr:
770 if (extent_entry_is_crc(entry))
776 case KEY_TYPE_reflink_p: {
777 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
779 le64_add_cpu(&p.v->idx, sub);
782 case KEY_TYPE_reservation:
789 bool bch2_cut_back(struct bpos where, struct bkey *k)
793 if (bkey_cmp(where, k->p) >= 0)
796 EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
798 len = where.offset - bkey_start_offset(k);
804 k->type = KEY_TYPE_deleted;
809 static bool extent_i_save(struct btree *b, struct bkey_packed *dst,
812 struct bkey_format *f = &b->format;
813 struct bkey_i *dst_unpacked;
814 struct bkey_packed tmp;
816 if ((dst_unpacked = packed_to_bkey(dst)))
817 dst_unpacked->k = src->k;
818 else if (bch2_bkey_pack_key(&tmp, &src->k, f))
819 memcpy_u64s(dst, &tmp, f->key_u64s);
823 memcpy_u64s(bkeyp_val(f, dst), &src->v, bkey_val_u64s(&src->k));
827 static bool bch2_extent_merge_inline(struct bch_fs *,
829 struct bkey_packed *,
830 struct bkey_packed *,
833 static void verify_extent_nonoverlapping(struct bch_fs *c,
835 struct btree_node_iter *_iter,
836 struct bkey_i *insert)
838 #ifdef CONFIG_BCACHEFS_DEBUG
839 struct btree_node_iter iter;
840 struct bkey_packed *k;
843 if (!expensive_debug_checks(c))
847 k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
849 (uk = bkey_unpack_key(b, k),
850 bkey_cmp(uk.p, bkey_start_pos(&insert->k)) > 0));
853 k = bch2_btree_node_iter_peek_filter(&iter, b, KEY_TYPE_discard);
856 (uk = bkey_unpack_key(b, k),
857 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0);
860 (uk = bkey_unpack_key(b, k),
861 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0) {
865 bch2_bkey_to_text(&PBUF(buf1), &insert->k);
866 bch2_bkey_to_text(&PBUF(buf2), &uk);
868 bch2_dump_btree_node(b);
869 panic("insert > next :\n"
879 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
880 struct bkey_i *insert)
882 struct btree_iter_level *l = &iter->l[0];
883 struct btree_node_iter node_iter;
884 struct bkey_packed *k;
886 BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
888 EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
889 verify_extent_nonoverlapping(c, l->b, &l->iter, insert);
891 if (debug_check_bkeys(c))
892 bch2_bkey_debugcheck(c, l->b, bkey_i_to_s_c(insert));
895 k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_discard);
896 if (k && !bkey_written(l->b, k) &&
897 bch2_extent_merge_inline(c, iter, k, bkey_to_packed(insert), true))
901 k = bch2_btree_node_iter_peek_filter(&node_iter, l->b, KEY_TYPE_discard);
902 if (k && !bkey_written(l->b, k) &&
903 bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), k, false))
907 * may have skipped past some deleted extents greater than the insert
908 * key, before we got to a non deleted extent and knew we could bail out
909 * rewind the iterator a bit if necessary:
912 while ((k = bch2_btree_node_iter_prev_all(&node_iter, l->b)) &&
913 bkey_cmp_left_packed(l->b, k, &insert->k.p) > 0)
916 k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
918 bch2_bset_insert(l->b, &l->iter, k, insert, 0);
919 bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
922 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
924 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
925 const union bch_extent_entry *entry;
928 bkey_extent_entry_for_each(ptrs, entry) {
929 switch (__extent_entry_type(entry)) {
930 case BCH_EXTENT_ENTRY_ptr:
931 case BCH_EXTENT_ENTRY_stripe_ptr:
939 static int count_iters_for_insert(struct btree_trans *trans,
950 case KEY_TYPE_extent:
951 case KEY_TYPE_reflink_v:
952 *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
954 if (*nr_iters >= max_iters) {
955 *end = bpos_min(*end, k.k->p);
960 case KEY_TYPE_reflink_p: {
961 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
962 u64 idx = le64_to_cpu(p.v->idx);
963 unsigned sectors = end->offset - bkey_start_offset(p.k);
964 struct btree_iter *iter;
967 for_each_btree_key(trans, iter,
968 BTREE_ID_REFLINK, POS(0, idx + offset),
969 BTREE_ITER_SLOTS, r_k, ret) {
970 if (bkey_cmp(bkey_start_pos(r_k.k),
971 POS(0, idx + sectors)) >= 0)
977 r_k.k->type == KEY_TYPE_reflink_v) {
978 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(r_k);
980 if (le64_to_cpu(r.v->refcount) == 1)
981 *nr_iters += bch2_bkey_nr_alloc_ptrs(r_k);
985 * if we're going to be deleting an entry from
986 * the reflink btree, need more iters...
989 if (*nr_iters >= max_iters) {
990 struct bpos pos = bkey_start_pos(k.k);
991 pos.offset += r_k.k->p.offset - idx;
993 *end = bpos_min(*end, pos);
999 bch2_trans_iter_put(trans, iter);
1007 #define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
1009 int bch2_extent_atomic_end(struct btree_iter *iter,
1010 struct bkey_i *insert,
1013 struct btree_trans *trans = iter->trans;
1014 struct btree *b = iter->l[0].b;
1015 struct btree_node_iter node_iter = iter->l[0].iter;
1016 struct bkey_packed *_k;
1017 unsigned nr_iters = 0;
1020 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1021 BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
1023 *end = bpos_min(insert->k.p, b->key.k.p);
1025 ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
1026 &nr_iters, EXTENT_ITERS_MAX / 2, false);
1030 while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1031 KEY_TYPE_discard))) {
1032 struct bkey unpacked;
1033 struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
1034 unsigned offset = 0;
1036 if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
1039 if (bkey_cmp(bkey_start_pos(&insert->k),
1040 bkey_start_pos(k.k)) > 0)
1041 offset = bkey_start_offset(&insert->k) -
1042 bkey_start_offset(k.k);
1044 ret = count_iters_for_insert(trans, k, offset, end,
1045 &nr_iters, EXTENT_ITERS_MAX, true);
1049 bch2_btree_node_iter_advance(&node_iter, b);
1052 return ret < 0 ? ret : 0;
1055 int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
1060 ret = bch2_extent_atomic_end(iter, k, &end);
1064 bch2_cut_back(end, &k->k);
1068 int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
1073 ret = bch2_extent_atomic_end(iter, k, &end);
1077 return !bkey_cmp(end, k->k.p);
1080 enum btree_insert_ret
1081 bch2_extent_can_insert(struct btree_trans *trans,
1082 struct btree_insert_entry *insert,
1085 struct btree_iter_level *l = &insert->iter->l[0];
1086 struct btree_node_iter node_iter = l->iter;
1087 enum bch_extent_overlap overlap;
1088 struct bkey_packed *_k;
1089 struct bkey unpacked;
1094 * We avoid creating whiteouts whenever possible when deleting, but
1095 * those optimizations mean we may potentially insert two whiteouts
1096 * instead of one (when we overlap with the front of one extent and the
1099 if (bkey_whiteout(&insert->k->k))
1102 _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
1105 return BTREE_INSERT_OK;
1107 k = bkey_disassemble(l->b, _k, &unpacked);
1109 overlap = bch2_extent_overlap(&insert->k->k, k.k);
1111 /* account for having to split existing extent: */
1112 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1115 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1116 (sectors = bch2_extent_is_compressed(k))) {
1117 int flags = trans->flags & BTREE_INSERT_NOFAIL
1118 ? BCH_DISK_RESERVATION_NOFAIL : 0;
1120 switch (bch2_disk_reservation_add(trans->c,
1126 return BTREE_INSERT_ENOSPC;
1132 return BTREE_INSERT_OK;
1136 extent_squash(struct bch_fs *c, struct btree_iter *iter,
1137 struct bkey_i *insert,
1138 struct bkey_packed *_k, struct bkey_s k,
1139 enum bch_extent_overlap overlap)
1141 struct btree_iter_level *l = &iter->l[0];
1144 case BCH_EXTENT_OVERLAP_FRONT:
1145 /* insert overlaps with start of k: */
1146 __bch2_cut_front(insert->k.p, k);
1147 EBUG_ON(bkey_deleted(k.k));
1148 extent_save(l->b, _k, k.k);
1149 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1150 _k, _k->u64s, _k->u64s);
1153 case BCH_EXTENT_OVERLAP_BACK:
1154 /* insert overlaps with end of k: */
1155 bch2_cut_back(bkey_start_pos(&insert->k), k.k);
1156 EBUG_ON(bkey_deleted(k.k));
1157 extent_save(l->b, _k, k.k);
1160 * As the auxiliary tree is indexed by the end of the
1161 * key and we've just changed the end, update the
1164 bch2_bset_fix_invalidated_key(l->b, _k);
1165 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1166 _k, _k->u64s, _k->u64s);
1169 case BCH_EXTENT_OVERLAP_ALL: {
1170 /* The insert key completely covers k, invalidate k */
1171 if (!bkey_whiteout(k.k))
1172 btree_account_key_drop(l->b, _k);
1175 k.k->type = KEY_TYPE_deleted;
1177 if (_k >= btree_bset_last(l->b)->start) {
1178 unsigned u64s = _k->u64s;
1180 bch2_bset_delete(l->b, _k, _k->u64s);
1181 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1184 extent_save(l->b, _k, k.k);
1185 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1186 _k, _k->u64s, _k->u64s);
1191 case BCH_EXTENT_OVERLAP_MIDDLE: {
1192 BKEY_PADDED(k) split;
1194 * The insert key falls 'in the middle' of k
1195 * The insert key splits k in 3:
1196 * - start only in k, preserve
1197 * - middle common section, invalidate in k
1198 * - end only in k, preserve
1200 * We update the old key to preserve the start,
1201 * insert will be the new common section,
1202 * we manually insert the end that we are preserving.
1204 * modify k _before_ doing the insert (which will move
1207 bkey_reassemble(&split.k, k.s_c);
1208 split.k.k.needs_whiteout |= bkey_written(l->b, _k);
1210 bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1211 BUG_ON(bkey_deleted(&split.k.k));
1213 __bch2_cut_front(insert->k.p, k);
1214 BUG_ON(bkey_deleted(k.k));
1215 extent_save(l->b, _k, k.k);
1216 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1217 _k, _k->u64s, _k->u64s);
1219 extent_bset_insert(c, iter, &split.k);
1225 struct extent_insert_state {
1226 struct bkey_i whiteout;
1227 bool update_journal;
1232 static void __bch2_insert_fixup_extent(struct bch_fs *c,
1233 struct btree_iter *iter,
1234 struct bkey_i *insert,
1235 struct extent_insert_state *s)
1237 struct btree_iter_level *l = &iter->l[0];
1238 struct bkey_packed *_k;
1239 struct bkey unpacked;
1241 while ((_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
1242 KEY_TYPE_discard))) {
1243 struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
1244 struct bpos cur_end = bpos_min(insert->k.p, k.k->p);
1245 enum bch_extent_overlap overlap =
1246 bch2_extent_overlap(&insert->k, k.k);
1248 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1251 if (!bkey_whiteout(k.k))
1252 s->update_journal = true;
1254 if (!s->update_journal) {
1255 bch2_cut_front(cur_end, insert);
1256 bch2_cut_front(cur_end, &s->whiteout);
1257 bch2_btree_iter_set_pos_same_leaf(iter, cur_end);
1262 * When deleting, if possible just do it by switching the type
1263 * of the key we're deleting, instead of creating and inserting
1268 !bkey_cmp(insert->k.p, k.k->p) &&
1269 !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
1270 if (!bkey_whiteout(k.k)) {
1271 btree_account_key_drop(l->b, _k);
1272 _k->type = KEY_TYPE_discard;
1273 reserve_whiteout(l->b, _k);
1274 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1275 _k, _k->u64s, _k->u64s);
1280 if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
1281 insert->k.needs_whiteout = true;
1282 s->update_btree = true;
1285 if (s->update_btree &&
1286 overlap == BCH_EXTENT_OVERLAP_ALL &&
1287 bkey_whiteout(k.k) &&
1288 k.k->needs_whiteout) {
1289 unreserve_whiteout(l->b, _k);
1290 _k->needs_whiteout = false;
1293 extent_squash(c, iter, insert, _k, k, overlap);
1295 if (!s->update_btree)
1296 bch2_cut_front(cur_end, insert);
1298 if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
1299 overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1305 * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1307 * this may result in not actually doing the insert, or inserting some subset
1308 * of the insert key. For cmpxchg operations this is where that logic lives.
1310 * All subsets of @insert that need to be inserted are inserted using
1311 * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
1312 * returns false, setting @iter->pos for the prefix of @insert that actually got
1315 * BSET INVARIANTS: this function is responsible for maintaining all the
1316 * invariants for bsets of extents in memory. things get really hairy with 0
1321 * bkey_start_pos(bkey_next(k)) >= k
1322 * or bkey_start_offset(bkey_next(k)) >= k->offset
1324 * i.e. strict ordering, no overlapping extents.
1326 * multiple bsets (i.e. full btree node):
1329 * k.size != 0 ∧ j.size != 0 →
1330 * ¬ (k > bkey_start_pos(j) ∧ k < j)
1332 * i.e. no two overlapping keys _of nonzero size_
1334 * We can't realistically maintain this invariant for zero size keys because of
1335 * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
1336 * there may be another 0 size key between them in another bset, and it will
1337 * thus overlap with the merged key.
1339 * In addition, the end of iter->pos indicates how much has been processed.
1340 * If the end of iter->pos is not the same as the end of insert, then
1341 * key insertion needs to continue/be retried.
1343 void bch2_insert_fixup_extent(struct btree_trans *trans,
1344 struct btree_insert_entry *insert)
1346 struct bch_fs *c = trans->c;
1347 struct btree_iter *iter = insert->iter;
1348 struct extent_insert_state s = {
1349 .whiteout = *insert->k,
1350 .update_journal = !bkey_whiteout(&insert->k->k),
1351 .update_btree = !bkey_whiteout(&insert->k->k),
1352 .deleting = bkey_whiteout(&insert->k->k),
1356 EBUG_ON(iter->level);
1357 EBUG_ON(!insert->k->k.size);
1358 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1360 __bch2_insert_fixup_extent(c, iter, insert->k, &s);
1362 bch2_btree_iter_set_pos_same_leaf(iter, insert->k->k.p);
1364 if (s.update_btree) {
1365 bkey_copy(&tmp.k, insert->k);
1368 tmp.k.k.type = KEY_TYPE_discard;
1370 EBUG_ON(bkey_deleted(&tmp.k.k) || !tmp.k.k.size);
1372 extent_bset_insert(c, iter, &tmp.k);
1375 if (s.update_journal) {
1376 bkey_copy(&tmp.k, !s.deleting ? insert->k : &s.whiteout);
1379 tmp.k.k.type = KEY_TYPE_discard;
1381 EBUG_ON(bkey_deleted(&tmp.k.k) || !tmp.k.k.size);
1383 bch2_btree_journal_key(trans, iter, &tmp.k);
1386 bch2_cut_front(insert->k->k.p, insert->k);
1389 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
1391 return bch2_bkey_ptrs_invalid(c, k);
1394 void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
1396 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1397 const union bch_extent_entry *entry;
1398 struct extent_ptr_decoded p;
1402 * XXX: we should be doing most/all of these checks at startup time,
1403 * where we check bch2_bkey_invalid() in btree_node_read_done()
1405 * But note that we can't check for stale pointers or incorrect gc marks
1406 * until after journal replay is done (it might be an extent that's
1407 * going to get overwritten during replay)
1410 if (percpu_down_read_trylock(&c->mark_lock)) {
1411 bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1412 !bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
1413 "extent key bad (replicas not marked in superblock):\n%s",
1414 (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
1415 percpu_up_read(&c->mark_lock);
1418 * If journal replay hasn't finished, we might be seeing keys
1419 * that will be overwritten by the time journal replay is done:
1421 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1424 extent_for_each_ptr_decode(e, p, entry) {
1425 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1426 struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
1427 unsigned stale = gen_after(mark.gen, p.ptr.gen);
1428 unsigned disk_sectors = ptr_disk_sectors(p);
1429 unsigned mark_sectors = p.ptr.cached
1430 ? mark.cached_sectors
1431 : mark.dirty_sectors;
1433 bch2_fs_bug_on(stale && !p.ptr.cached, c,
1434 "stale dirty pointer (ptr gen %u bucket %u",
1435 p.ptr.gen, mark.gen);
1437 bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
1439 bch2_fs_bug_on(!stale &&
1440 (mark.data_type != BCH_DATA_USER ||
1441 mark_sectors < disk_sectors), c,
1442 "extent pointer not marked: %s:\n"
1443 "type %u sectors %u < %u",
1444 (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
1446 mark_sectors, disk_sectors);
1450 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
1453 bch2_bkey_ptrs_to_text(out, c, k);
1456 static unsigned bch2_crc_field_size_max[] = {
1457 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
1458 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
1459 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
1462 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
1463 struct bch_extent_crc_unpacked src)
1465 #define set_common_fields(_dst, _src) \
1466 _dst.csum_type = _src.csum_type, \
1467 _dst.compression_type = _src.compression_type, \
1468 _dst._compressed_size = _src.compressed_size - 1, \
1469 _dst._uncompressed_size = _src.uncompressed_size - 1, \
1470 _dst.offset = _src.offset
1472 switch (extent_entry_type(to_entry(dst))) {
1473 case BCH_EXTENT_ENTRY_crc32:
1474 set_common_fields(dst->crc32, src);
1475 dst->crc32.csum = *((__le32 *) &src.csum.lo);
1477 case BCH_EXTENT_ENTRY_crc64:
1478 set_common_fields(dst->crc64, src);
1479 dst->crc64.nonce = src.nonce;
1480 dst->crc64.csum_lo = src.csum.lo;
1481 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
1483 case BCH_EXTENT_ENTRY_crc128:
1484 set_common_fields(dst->crc128, src);
1485 dst->crc128.nonce = src.nonce;
1486 dst->crc128.csum = src.csum;
1491 #undef set_common_fields
1494 static void bch2_extent_crc_append(struct bkey_i *k,
1495 struct bch_extent_crc_unpacked new)
1497 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
1498 union bch_extent_crc *crc = (void *) ptrs.end;
1500 if (bch_crc_bytes[new.csum_type] <= 4 &&
1501 new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
1502 new.nonce <= CRC32_NONCE_MAX)
1503 crc->type = 1 << BCH_EXTENT_ENTRY_crc32;
1504 else if (bch_crc_bytes[new.csum_type] <= 10 &&
1505 new.uncompressed_size - 1 <= CRC64_SIZE_MAX &&
1506 new.nonce <= CRC64_NONCE_MAX)
1507 crc->type = 1 << BCH_EXTENT_ENTRY_crc64;
1508 else if (bch_crc_bytes[new.csum_type] <= 16 &&
1509 new.uncompressed_size - 1 <= CRC128_SIZE_MAX &&
1510 new.nonce <= CRC128_NONCE_MAX)
1511 crc->type = 1 << BCH_EXTENT_ENTRY_crc128;
1515 bch2_extent_crc_pack(crc, new);
1517 k->k.u64s += extent_entry_u64s(ptrs.end);
1519 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
1522 static inline void __extent_entry_insert(struct bkey_i *k,
1523 union bch_extent_entry *dst,
1524 union bch_extent_entry *new)
1526 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
1528 memmove_u64s_up((u64 *) dst + extent_entry_u64s(new),
1529 dst, (u64 *) end - (u64 *) dst);
1530 k->k.u64s += extent_entry_u64s(new);
1531 memcpy(dst, new, extent_entry_bytes(new));
1534 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
1535 struct extent_ptr_decoded *p)
1537 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
1538 struct bch_extent_crc_unpacked crc =
1539 bch2_extent_crc_unpack(&k->k, NULL);
1540 union bch_extent_entry *pos;
1543 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
1548 bkey_for_each_crc(&k->k, ptrs, crc, pos)
1549 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
1550 pos = extent_entry_next(pos);
1554 bch2_extent_crc_append(k, p->crc);
1555 pos = bkey_val_end(bkey_i_to_s(k));
1557 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
1558 __extent_entry_insert(k, pos, to_entry(&p->ptr));
1560 for (i = 0; i < p->ec_nr; i++) {
1561 p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
1562 __extent_entry_insert(k, pos, to_entry(&p->ec[i]));
1567 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1569 * Returns true if @k should be dropped entirely
1571 * For existing keys, only called when btree nodes are being rewritten, not when
1572 * they're merely being compacted/resorted in memory.
1574 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1576 struct bch_extent_ptr *ptr;
1578 bch2_bkey_drop_ptrs(k, ptr,
1580 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
1582 /* will only happen if all pointers were cached: */
1583 if (!bkey_val_u64s(k.k))
1584 k.k->type = KEY_TYPE_discard;
1586 return bkey_whiteout(k.k);
1589 void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
1591 unsigned nr_desired_replicas)
1593 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1594 union bch_extent_entry *entry;
1595 struct extent_ptr_decoded p;
1596 int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
1598 if (target && extra > 0)
1599 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1600 int n = bch2_extent_ptr_durability(c, p);
1602 if (n && n <= extra &&
1603 !bch2_dev_in_target(c, p.ptr.dev, target)) {
1604 entry->ptr.cached = true;
1610 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1611 int n = bch2_extent_ptr_durability(c, p);
1613 if (n && n <= extra) {
1614 entry->ptr.cached = true;
1620 enum merge_result bch2_extent_merge(struct bch_fs *c,
1621 struct bkey_s _l, struct bkey_s _r)
1623 struct bkey_s_extent l = bkey_s_to_extent(_l);
1624 struct bkey_s_extent r = bkey_s_to_extent(_r);
1625 union bch_extent_entry *en_l = l.v->start;
1626 union bch_extent_entry *en_r = r.v->start;
1627 struct bch_extent_crc_unpacked crc_l, crc_r;
1629 if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
1630 return BCH_MERGE_NOMERGE;
1632 crc_l = bch2_extent_crc_unpack(l.k, NULL);
1634 extent_for_each_entry(l, en_l) {
1635 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
1637 if (extent_entry_type(en_l) != extent_entry_type(en_r))
1638 return BCH_MERGE_NOMERGE;
1640 switch (extent_entry_type(en_l)) {
1641 case BCH_EXTENT_ENTRY_ptr: {
1642 const struct bch_extent_ptr *lp = &en_l->ptr;
1643 const struct bch_extent_ptr *rp = &en_r->ptr;
1646 if (lp->offset + crc_l.compressed_size != rp->offset ||
1647 lp->dev != rp->dev ||
1649 return BCH_MERGE_NOMERGE;
1651 /* We don't allow extents to straddle buckets: */
1652 ca = bch_dev_bkey_exists(c, lp->dev);
1654 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
1655 return BCH_MERGE_NOMERGE;
1659 case BCH_EXTENT_ENTRY_stripe_ptr:
1660 if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
1661 en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
1662 return BCH_MERGE_NOMERGE;
1664 case BCH_EXTENT_ENTRY_crc32:
1665 case BCH_EXTENT_ENTRY_crc64:
1666 case BCH_EXTENT_ENTRY_crc128:
1667 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
1668 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
1670 if (crc_l.csum_type != crc_r.csum_type ||
1671 crc_l.compression_type != crc_r.compression_type ||
1672 crc_l.nonce != crc_r.nonce)
1673 return BCH_MERGE_NOMERGE;
1675 if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
1677 return BCH_MERGE_NOMERGE;
1679 if (!bch2_checksum_mergeable(crc_l.csum_type))
1680 return BCH_MERGE_NOMERGE;
1682 if (crc_l.compression_type)
1683 return BCH_MERGE_NOMERGE;
1685 if (crc_l.csum_type &&
1686 crc_l.uncompressed_size +
1687 crc_r.uncompressed_size > c->sb.encoded_extent_max)
1688 return BCH_MERGE_NOMERGE;
1690 if (crc_l.uncompressed_size + crc_r.uncompressed_size - 1 >
1691 bch2_crc_field_size_max[extent_entry_type(en_l)])
1692 return BCH_MERGE_NOMERGE;
1696 return BCH_MERGE_NOMERGE;
1700 extent_for_each_entry(l, en_l) {
1701 struct bch_extent_crc_unpacked crc_l, crc_r;
1703 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
1705 if (!extent_entry_is_crc(en_l))
1708 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
1709 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
1711 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
1714 crc_r.uncompressed_size << 9);
1716 crc_l.uncompressed_size += crc_r.uncompressed_size;
1717 crc_l.compressed_size += crc_r.compressed_size;
1719 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l);
1722 bch2_key_resize(l.k, l.k->size + r.k->size);
1724 return BCH_MERGE_MERGE;
1728 * When merging an extent that we're inserting into a btree node, the new merged
1729 * extent could overlap with an existing 0 size extent - if we don't fix that,
1730 * it'll break the btree node iterator so this code finds those 0 size extents
1731 * and shifts them out of the way.
1733 * Also unpacks and repacks.
1735 static bool bch2_extent_merge_inline(struct bch_fs *c,
1736 struct btree_iter *iter,
1737 struct bkey_packed *l,
1738 struct bkey_packed *r,
1741 struct btree *b = iter->l[0].b;
1742 struct btree_node_iter *node_iter = &iter->l[0].iter;
1743 BKEY_PADDED(k) li, ri;
1744 struct bkey_packed *m = back_merge ? l : r;
1745 struct bkey_i *mi = back_merge ? &li.k : &ri.k;
1746 struct bset_tree *t = bch2_bkey_to_bset(b, m);
1747 enum merge_result ret;
1749 EBUG_ON(bkey_written(b, m));
1751 if (bkey_val_u64s(l) > BKEY_EXTENT_VAL_U64s_MAX ||
1752 bkey_val_u64s(r) > BKEY_EXTENT_VAL_U64s_MAX)
1753 return BCH_MERGE_NOMERGE;
1756 * We need to save copies of both l and r, because we might get a
1757 * partial merge (which modifies both) and then fails to repack
1759 bch2_bkey_unpack(b, &li.k, l);
1760 bch2_bkey_unpack(b, &ri.k, r);
1762 ret = bch2_bkey_merge(c,
1764 bkey_i_to_s(&ri.k));
1765 if (ret == BCH_MERGE_NOMERGE)
1768 if (debug_check_bkeys(c))
1769 bch2_bkey_debugcheck(c, b, bkey_i_to_s_c(&li.k));
1770 if (debug_check_bkeys(c) &&
1771 ret == BCH_MERGE_PARTIAL)
1772 bch2_bkey_debugcheck(c, b, bkey_i_to_s_c(&ri.k));
1775 * check if we overlap with deleted extents - would break the sort
1779 struct bkey_packed *n = bkey_next(m);
1781 if (n != btree_bkey_last(b, t) &&
1782 bkey_cmp_left_packed(b, n, &li.k.k.p) <= 0 &&
1785 } else if (ret == BCH_MERGE_MERGE) {
1786 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1789 bkey_cmp_left_packed_byval(b, prev,
1790 bkey_start_pos(&li.k.k)) > 0)
1794 if (ret == BCH_MERGE_PARTIAL) {
1795 if (!extent_i_save(b, m, mi))
1799 bkey_copy(packed_to_bkey(l), &li.k);
1801 bkey_copy(packed_to_bkey(r), &ri.k);
1803 if (!extent_i_save(b, m, &li.k))
1807 bch2_bset_fix_invalidated_key(b, m);
1808 bch2_btree_node_iter_fix(iter, b, node_iter,
1809 m, m->u64s, m->u64s);
1811 return ret == BCH_MERGE_MERGE;
1814 bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
1815 unsigned nr_replicas)
1817 struct btree_trans trans;
1818 struct btree_iter *iter;
1819 struct bpos end = pos;
1826 bch2_trans_init(&trans, c, 0, 0);
1828 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
1829 BTREE_ITER_SLOTS, k, err) {
1830 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
1833 if (nr_replicas > bch2_bkey_nr_ptrs_allocated(k)) {
1838 bch2_trans_exit(&trans);
1843 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
1847 switch (k.k->type) {
1848 case KEY_TYPE_extent: {
1849 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1850 const union bch_extent_entry *entry;
1851 struct extent_ptr_decoded p;
1853 extent_for_each_ptr_decode(e, p, entry)
1854 ret += !p.ptr.cached &&
1855 p.crc.compression_type == BCH_COMPRESSION_NONE;
1858 case KEY_TYPE_reservation:
1859 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
1866 /* KEY_TYPE_reservation: */
1868 const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
1870 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1872 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1873 return "incorrect value size";
1875 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1876 return "invalid nr_replicas";
1881 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
1884 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1886 pr_buf(out, "generation %u replicas %u",
1887 le32_to_cpu(r.v->generation),
1891 enum merge_result bch2_reservation_merge(struct bch_fs *c,
1892 struct bkey_s _l, struct bkey_s _r)
1894 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
1895 struct bkey_s_reservation r = bkey_s_to_reservation(_r);
1897 if (l.v->generation != r.v->generation ||
1898 l.v->nr_replicas != r.v->nr_replicas)
1899 return BCH_MERGE_NOMERGE;
1901 if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
1902 bch2_key_resize(l.k, KEY_SIZE_MAX);
1903 __bch2_cut_front(l.k->p, r.s);
1904 return BCH_MERGE_PARTIAL;
1907 bch2_key_resize(l.k, l.k->size + r.k->size);
1909 return BCH_MERGE_MERGE;