2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 * Code for managing the extent btree and dynamically updating the writeback
9 #include "bkey_methods.h"
11 #include "btree_update.h"
20 #include "writeback.h"
23 #include <trace/events/bcache.h>
25 static enum merge_result bch_extent_merge(struct cache_set *, struct btree *,
26 struct bkey_i *, struct bkey_i *);
28 static void sort_key_next(struct btree_node_iter *iter,
30 struct btree_node_iter_set *i)
32 i->k += __btree_node_offset_to_key(b, i->k)->u64s;
35 *i = iter->data[--iter->used];
39 * Returns true if l > r - unless l == r, in which case returns true if l is
42 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
43 * equal in different sets, we have to process them newest to oldest.
45 #define key_sort_cmp(l, r) \
47 int _c = bkey_cmp_packed(b, \
48 __btree_node_offset_to_key(b, (l).k), \
49 __btree_node_offset_to_key(b, (r).k)); \
51 _c ? _c > 0 : (l).k > (r).k; \
54 static inline bool should_drop_next_key(struct btree_node_iter *iter,
57 struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
58 struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
67 key_sort_cmp(r[0], r[1]))
71 * key_sort_cmp() ensures that when keys compare equal the older key
72 * comes first; so if l->k compares equal to r->k then l->k is older and
75 return !bkey_cmp_packed(b,
76 __btree_node_offset_to_key(b, l->k),
77 __btree_node_offset_to_key(b, r->k));
80 struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst,
82 struct btree_node_iter *iter)
84 struct bkey_packed *out = dst->start;
85 struct btree_nr_keys nr;
87 memset(&nr, 0, sizeof(nr));
89 heap_resort(iter, key_sort_cmp);
91 while (!bch_btree_node_iter_end(iter)) {
92 if (!should_drop_next_key(iter, b)) {
93 struct bkey_packed *k =
94 __btree_node_offset_to_key(b, iter->data->k);
97 btree_keys_account_key_add(&nr, 0, out);
101 sort_key_next(iter, b, iter->data);
102 heap_sift(iter, 0, key_sort_cmp);
105 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
109 /* Common among btree and extent ptrs */
111 const struct bch_extent_ptr *
112 bch_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
114 const struct bch_extent_ptr *ptr;
116 extent_for_each_ptr(e, ptr)
123 unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent e)
125 const struct bch_extent_ptr *ptr;
126 unsigned nr_ptrs = 0;
128 extent_for_each_ptr(e, ptr)
134 unsigned bch_extent_nr_dirty_ptrs(struct bkey_s_c k)
136 struct bkey_s_c_extent e;
137 const struct bch_extent_ptr *ptr;
138 unsigned nr_ptrs = 0;
142 case BCH_EXTENT_CACHED:
143 e = bkey_s_c_to_extent(k);
145 extent_for_each_ptr(e, ptr)
146 nr_ptrs += !ptr->cached;
149 case BCH_RESERVATION:
150 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
157 /* returns true if equal */
158 static bool crc_cmp(union bch_extent_crc *l, union bch_extent_crc *r)
160 return extent_crc_type(l) == extent_crc_type(r) &&
161 !memcmp(l, r, extent_entry_bytes(to_entry(l)));
164 /* Increment pointers after @crc by crc's offset until the next crc entry: */
165 void bch_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc *crc)
167 union bch_extent_entry *entry;
169 extent_for_each_entry_from(e, entry, extent_entry_next(to_entry(crc))) {
170 if (!extent_entry_is_ptr(entry))
173 entry->ptr.offset += crc_offset(crc);
178 * We're writing another replica for this extent, so while we've got the data in
179 * memory we'll be computing a new checksum for the currently live data.
181 * If there are other replicas we aren't moving, and they are checksummed but
182 * not compressed, we can modify them to point to only the data that is
183 * currently live (so that readers won't have to bounce) while we've got the
186 * XXX: to guard against data being corrupted while in memory, instead of
187 * recomputing the checksum here, it would be better in the read path to instead
188 * of computing the checksum of the entire extent:
192 * compute the checksums of the live and dead data separately
193 * | dead data || live data || dead data |
195 * and then verify that crc_dead1 + crc_live + crc_dead2 == orig_crc, and then
196 * use crc_live here (that we verified was correct earlier)
198 * note: doesn't work with encryption
200 void bch_extent_narrow_crcs(struct bkey_s_extent e)
202 union bch_extent_crc *crc;
203 bool have_wide = false, have_narrow = false;
204 struct bch_csum csum = { 0 };
205 unsigned csum_type = 0;
207 extent_for_each_crc(e, crc) {
208 if (crc_compression_type(crc) ||
209 bch_csum_type_is_encryption(crc_csum_type(crc)))
212 if (crc_uncompressed_size(e.k, crc) != e.k->size) {
216 csum = crc_csum(crc);
217 csum_type = crc_csum_type(crc);
221 if (!have_wide || !have_narrow)
224 extent_for_each_crc(e, crc) {
225 if (crc_compression_type(crc))
228 if (crc_uncompressed_size(e.k, crc) != e.k->size) {
229 switch (extent_crc_type(crc)) {
230 case BCH_EXTENT_CRC_NONE:
232 case BCH_EXTENT_CRC32:
233 if (bch_crc_bytes[csum_type] > 4)
236 bch_extent_crc_narrow_pointers(e, crc);
237 crc->crc32._compressed_size = e.k->size - 1;
238 crc->crc32._uncompressed_size = e.k->size - 1;
239 crc->crc32.offset = 0;
240 crc->crc32.csum_type = csum_type;
241 crc->crc32.csum = csum.lo;
243 case BCH_EXTENT_CRC64:
244 if (bch_crc_bytes[csum_type] > 10)
247 bch_extent_crc_narrow_pointers(e, crc);
248 crc->crc64._compressed_size = e.k->size - 1;
249 crc->crc64._uncompressed_size = e.k->size - 1;
250 crc->crc64.offset = 0;
251 crc->crc64.csum_type = csum_type;
252 crc->crc64.csum_lo = csum.lo;
253 crc->crc64.csum_hi = csum.hi;
255 case BCH_EXTENT_CRC128:
256 if (bch_crc_bytes[csum_type] > 16)
259 bch_extent_crc_narrow_pointers(e, crc);
260 crc->crc128._compressed_size = e.k->size - 1;
261 crc->crc128._uncompressed_size = e.k->size - 1;
262 crc->crc128.offset = 0;
263 crc->crc128.csum_type = csum_type;
264 crc->crc128.csum = csum;
271 void bch_extent_drop_redundant_crcs(struct bkey_s_extent e)
273 union bch_extent_entry *entry = e.v->start;
274 union bch_extent_crc *crc, *prev = NULL;
276 while (entry != extent_entry_last(e)) {
277 union bch_extent_entry *next = extent_entry_next(entry);
278 size_t crc_u64s = extent_entry_u64s(entry);
280 if (!extent_entry_is_crc(entry))
283 crc = entry_to_crc(entry);
285 if (next == extent_entry_last(e)) {
286 /* crc entry with no pointers after it: */
290 if (extent_entry_is_crc(next)) {
291 /* no pointers before next crc entry: */
295 if (prev && crc_cmp(crc, prev)) {
296 /* identical to previous crc entry: */
301 !crc_csum_type(crc) &&
302 !crc_compression_type(crc)) {
303 /* null crc entry: */
304 bch_extent_crc_narrow_pointers(e, crc);
313 memmove_u64s_down(crc, next,
314 (u64 *) extent_entry_last(e) - (u64 *) next);
315 e.k->u64s -= crc_u64s;
318 EBUG_ON(bkey_val_u64s(e.k) && !bch_extent_nr_ptrs(e.c));
321 static bool should_drop_ptr(const struct cache_set *c,
322 struct bkey_s_c_extent e,
323 const struct bch_extent_ptr *ptr)
327 return (ca = PTR_CACHE(c, ptr)) && ptr_stale(ca, ptr);
330 static void bch_extent_drop_stale(struct cache_set *c, struct bkey_s_extent e)
332 struct bch_extent_ptr *ptr = &e.v->start->ptr;
333 bool dropped = false;
336 while ((ptr = extent_ptr_next(e, ptr)))
337 if (should_drop_ptr(c, e.c, ptr)) {
338 __bch_extent_drop_ptr(e, ptr);
345 bch_extent_drop_redundant_crcs(e);
348 static bool bch_ptr_normalize(struct cache_set *c, struct btree *bk,
351 return bch_extent_normalize(c, k);
354 static void bch_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
358 case BCH_EXTENT_CACHED: {
359 union bch_extent_entry *entry;
360 u64 *d = (u64 *) bkeyp_val(f, k);
363 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
366 for (entry = (union bch_extent_entry *) d;
367 entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
368 entry = extent_entry_next(entry)) {
369 switch (extent_entry_type(entry)) {
370 case BCH_EXTENT_ENTRY_crc32:
371 entry->crc32.csum = swab32(entry->crc32.csum);
373 case BCH_EXTENT_ENTRY_crc64:
374 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
375 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
377 case BCH_EXTENT_ENTRY_crc128:
378 entry->crc128.csum.hi = swab64(entry->crc64.csum_hi);
379 entry->crc128.csum.lo = swab64(entry->crc64.csum_lo);
381 case BCH_EXTENT_ENTRY_ptr:
390 static const char *extent_ptr_invalid(struct bkey_s_c_extent e,
391 const struct cache_member_rcu *mi,
392 const struct bch_extent_ptr *ptr,
393 unsigned size_ondisk)
395 const struct bch_extent_ptr *ptr2;
396 const struct cache_member_cpu *m = mi->m + ptr->dev;
398 if (ptr->dev > mi->nr_devices || !m->valid)
399 return "pointer to invalid device";
401 extent_for_each_ptr(e, ptr2)
402 if (ptr != ptr2 && ptr->dev == ptr2->dev)
403 return "multiple pointers to same device";
405 if (ptr->offset + size_ondisk > m->bucket_size * m->nbuckets)
406 return "offset past end of device";
408 if (ptr->offset < m->bucket_size * m->first_bucket)
409 return "offset before first bucket";
411 if ((ptr->offset & (m->bucket_size - 1)) + size_ondisk > m->bucket_size)
412 return "spans multiple buckets";
417 static size_t extent_print_ptrs(struct cache_set *c, char *buf,
418 size_t size, struct bkey_s_c_extent e)
420 char *out = buf, *end = buf + size;
421 const union bch_extent_entry *entry;
422 const union bch_extent_crc *crc;
423 const struct bch_extent_ptr *ptr;
427 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
430 extent_for_each_entry(e, entry) {
434 switch (__extent_entry_type(entry)) {
435 case BCH_EXTENT_ENTRY_crc32:
436 case BCH_EXTENT_ENTRY_crc64:
437 case BCH_EXTENT_ENTRY_crc128:
438 crc = entry_to_crc(entry);
440 p("crc: c_size %u size %u offset %u csum %u compress %u",
441 crc_compressed_size(e.k, crc),
442 crc_uncompressed_size(e.k, crc),
443 crc_offset(crc), crc_csum_type(crc),
444 crc_compression_type(crc));
446 case BCH_EXTENT_ENTRY_ptr:
447 ptr = entry_to_ptr(entry);
449 p("ptr: %u:%llu gen %u%s", ptr->dev,
450 (u64) ptr->offset, ptr->gen,
451 (ca = PTR_CACHE(c, ptr)) && ptr_stale(ca, ptr)
455 p("(invalid extent entry %.16llx)", *((u64 *) entry));
464 if (bkey_extent_is_cached(e.k))
472 static const char *bch_btree_ptr_invalid(const struct cache_set *c,
475 if (bkey_extent_is_cached(k.k))
479 return "nonzero key size";
481 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
482 return "value too big";
486 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
487 const union bch_extent_entry *entry;
488 const struct bch_extent_ptr *ptr;
489 const union bch_extent_crc *crc;
490 struct cache_member_rcu *mi;
493 extent_for_each_entry(e, entry)
494 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
495 return "invalid extent entry type";
497 mi = cache_member_info_get(c);
499 extent_for_each_ptr_crc(e, ptr, crc) {
500 reason = extent_ptr_invalid(e, mi, ptr,
501 c->sb.btree_node_size);
504 cache_member_info_put();
509 cache_member_info_put();
512 return "has crc field";
518 return "invalid value type";
522 static void btree_ptr_debugcheck(struct cache_set *c, struct btree *b,
525 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
526 const struct bch_extent_ptr *ptr;
532 unsigned replicas = 0;
537 extent_for_each_online_device(c, e, ptr, ca) {
540 if ((ca = PTR_CACHE(c, ptr))) {
541 g = PTR_BUCKET(ca, ptr);
544 if (ptr_stale(ca, ptr))
548 seq = read_seqcount_begin(&c->gc_pos_lock);
549 bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
550 !g->mark.is_metadata;
551 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
553 err = "inconsistent";
561 if (replicas < c->sb.meta_replicas_have) {
562 bch_bkey_val_to_text(c, btree_node_type(b),
563 buf, sizeof(buf), k);
565 "btree key bad (too few replicas, %u < %u): %s",
566 replicas, c->sb.meta_replicas_have, buf);
572 bch_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
573 cache_set_bug(c, "%s btree pointer %s: bucket %zi prio %i "
574 "gen %i last_gc %i mark %08x",
575 err, buf, PTR_BUCKET_NR(ca, ptr),
576 g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
577 ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
578 (unsigned) g->mark.counter);
582 static void bch_btree_ptr_to_text(struct cache_set *c, char *buf,
583 size_t size, struct bkey_s_c k)
585 char *out = buf, *end = buf + size;
588 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
590 if (bkey_extent_is_data(k.k))
591 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
593 invalid = bch_btree_ptr_invalid(c, k);
595 p(" invalid: %s", invalid);
599 struct extent_pick_ptr
600 bch_btree_pick_ptr(struct cache_set *c, const struct btree *b)
602 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
603 const union bch_extent_crc *crc;
604 const struct bch_extent_ptr *ptr;
609 extent_for_each_online_device_crc(c, e, crc, ptr, ca) {
610 struct btree *root = btree_node_root(c, b);
612 if (cache_set_inconsistent_on(crc, c,
613 "btree node pointer with crc at btree %u level %u/%u bucket %zu",
614 b->btree_id, b->level, root ? root->level : -1,
615 PTR_BUCKET_NR(ca, ptr)))
618 if (cache_inconsistent_on(ptr_stale(ca, ptr), ca,
619 "stale btree node pointer at btree %u level %u/%u bucket %zu",
620 b->btree_id, b->level, root ? root->level : -1,
621 PTR_BUCKET_NR(ca, ptr)))
624 percpu_ref_get(&ca->ref);
627 return (struct extent_pick_ptr) { .ptr = *ptr, .ca = ca };
632 return (struct extent_pick_ptr) { .ca = NULL, };
635 const struct bkey_ops bch_bkey_btree_ops = {
636 .key_invalid = bch_btree_ptr_invalid,
637 .key_debugcheck = btree_ptr_debugcheck,
638 .val_to_text = bch_btree_ptr_to_text,
639 .swab = bch_ptr_swab,
644 static bool __bch_cut_front(struct bpos where, struct bkey_s k)
648 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
651 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
653 len = k.k->p.offset - where.offset;
655 BUG_ON(len > k.k->size);
658 * Don't readjust offset if the key size is now 0, because that could
659 * cause offset to point to the next bucket:
662 __set_bkey_deleted(k.k);
663 else if (bkey_extent_is_data(k.k)) {
664 struct bkey_s_extent e = bkey_s_to_extent(k);
665 struct bch_extent_ptr *ptr;
666 union bch_extent_crc *crc, *prev_crc = NULL;
668 extent_for_each_ptr_crc(e, ptr, crc) {
669 switch (extent_crc_type(crc)) {
670 case BCH_EXTENT_CRC_NONE:
671 ptr->offset += e.k->size - len;
673 case BCH_EXTENT_CRC32:
675 crc->crc32.offset += e.k->size - len;
677 case BCH_EXTENT_CRC64:
679 crc->crc64.offset += e.k->size - len;
681 case BCH_EXTENT_CRC128:
683 crc->crc128.offset += e.k->size - len;
695 bool bch_cut_front(struct bpos where, struct bkey_i *k)
697 return __bch_cut_front(where, bkey_i_to_s(k));
700 bool bch_cut_back(struct bpos where, struct bkey *k)
704 if (bkey_cmp(where, k->p) >= 0)
707 EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
709 len = where.offset - bkey_start_offset(k);
711 BUG_ON(len > k->size);
717 __set_bkey_deleted(k);
723 * bch_key_resize - adjust size of @k
725 * bkey_start_offset(k) will be preserved, modifies where the extent ends
727 void bch_key_resize(struct bkey *k,
730 k->p.offset -= k->size;
731 k->p.offset += new_size;
736 * In extent_sort_fix_overlapping(), insert_fixup_extent(),
737 * extent_merge_inline() - we're modifying keys in place that are packed. To do
738 * that we have to unpack the key, modify the unpacked key - then this
739 * copies/repacks the unpacked to the original as necessary.
741 static bool __extent_save(struct btree *b, struct btree_node_iter *iter,
742 struct bkey_packed *dst, struct bkey *src)
744 struct bkey_format *f = &b->format;
745 struct bkey_i *dst_unpacked;
748 if ((dst_unpacked = packed_to_bkey(dst))) {
749 dst_unpacked->k = *src;
752 ret = bkey_pack_key(dst, src, f);
756 bch_verify_key_order(b, iter, dst);
761 static void extent_save(struct btree *b, struct btree_node_iter *iter,
762 struct bkey_packed *dst, struct bkey *src)
764 BUG_ON(!__extent_save(b, iter, dst, src));
768 * Returns true if l > r - unless l == r, in which case returns true if l is
771 * Necessary for sort_fix_overlapping() - if there are multiple keys that
772 * compare equal in different sets, we have to process them newest to oldest.
774 #define extent_sort_cmp(l, r) \
776 struct bkey _ul = bkey_unpack_key(b, \
777 __btree_node_offset_to_key(b, (l).k)); \
778 struct bkey _ur = bkey_unpack_key(b, \
779 __btree_node_offset_to_key(b, (r).k)); \
781 int _c = bkey_cmp(bkey_start_pos(&_ul), bkey_start_pos(&_ur)); \
782 _c ? _c > 0 : (l).k < (r).k; \
785 static inline void extent_sort_sift(struct btree_node_iter *iter,
786 struct btree *b, size_t i)
788 heap_sift(iter, i, extent_sort_cmp);
791 static inline void extent_sort_next(struct btree_node_iter *iter,
793 struct btree_node_iter_set *i)
795 sort_key_next(iter, b, i);
796 heap_sift(iter, i - iter->data, extent_sort_cmp);
799 static void extent_sort_append(struct cache_set *c,
801 struct btree_nr_keys *nr,
802 struct bkey_packed *start,
803 struct bkey_packed **prev,
804 struct bkey_packed *k)
806 struct bkey_format *f = &b->format;
809 if (bkey_whiteout(k))
812 bkey_unpack(b, &tmp.k, k);
815 bch_extent_merge(c, b, (void *) *prev, &tmp.k))
819 bkey_pack(*prev, (void *) *prev, f);
821 btree_keys_account_key_add(nr, 0, *prev);
822 *prev = bkey_next(*prev);
827 bkey_copy(*prev, &tmp.k);
830 struct btree_nr_keys bch_extent_sort_fix_overlapping(struct cache_set *c,
833 struct btree_node_iter *iter)
835 struct bkey_format *f = &b->format;
836 struct btree_node_iter_set *_l = iter->data, *_r;
837 struct bkey_packed *prev = NULL, *out, *lk, *rk;
838 struct bkey l_unpacked, r_unpacked;
840 struct btree_nr_keys nr;
842 memset(&nr, 0, sizeof(nr));
844 heap_resort(iter, extent_sort_cmp);
846 while (!bch_btree_node_iter_end(iter)) {
847 lk = __btree_node_offset_to_key(b, _l->k);
849 if (iter->used == 1) {
850 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
851 extent_sort_next(iter, b, _l);
856 if (iter->used > 2 &&
857 extent_sort_cmp(_r[0], _r[1]))
860 rk = __btree_node_offset_to_key(b, _r->k);
862 l = __bkey_disassemble(b, lk, &l_unpacked);
863 r = __bkey_disassemble(b, rk, &r_unpacked);
865 /* If current key and next key don't overlap, just append */
866 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
867 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
868 extent_sort_next(iter, b, _l);
872 /* Skip 0 size keys */
874 extent_sort_next(iter, b, _r);
879 * overlap: keep the newer key and trim the older key so they
880 * don't overlap. comparing pointers tells us which one is
881 * newer, since the bsets are appended one after the other.
884 /* can't happen because of comparison func */
885 BUG_ON(_l->k < _r->k &&
886 !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
890 if (bkey_cmp(l.k->p, r.k->p) >= 0) {
891 sort_key_next(iter, b, _r);
893 __bch_cut_front(l.k->p, r);
894 extent_save(b, NULL, rk, r.k);
897 extent_sort_sift(iter, b, _r - iter->data);
898 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
902 * r wins, but it overlaps in the middle of l - split l:
904 bkey_reassemble(&tmp.k, l.s_c);
905 bch_cut_back(bkey_start_pos(r.k), &tmp.k.k);
907 __bch_cut_front(r.k->p, l);
908 extent_save(b, NULL, lk, l.k);
910 extent_sort_sift(iter, b, 0);
912 extent_sort_append(c, b, &nr, dst->start, &prev,
913 bkey_to_packed(&tmp.k));
915 bch_cut_back(bkey_start_pos(r.k), l.k);
916 extent_save(b, NULL, lk, l.k);
921 bkey_pack(prev, (void *) prev, f);
922 btree_keys_account_key_add(&nr, 0, prev);
923 out = bkey_next(prev);
928 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
932 struct extent_insert_state {
933 struct btree_insert *trans;
934 struct btree_insert_entry *insert;
935 struct bpos committed;
936 struct bucket_stats_cache_set stats;
939 struct bkey_i whiteout;
944 static void bch_add_sectors(struct extent_insert_state *s,
945 struct bkey_s_c k, u64 offset, s64 sectors)
947 struct cache_set *c = s->trans->c;
948 struct btree *b = s->insert->iter->nodes[0];
950 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
955 bch_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
956 &s->stats, s->trans->journal_res.seq);
958 if (bkey_extent_is_data(k.k) &&
959 !bkey_extent_is_cached(k.k))
960 bcache_dev_sectors_dirty_add(c, k.k->p.inode, offset, sectors);
963 static void bch_subtract_sectors(struct extent_insert_state *s,
964 struct bkey_s_c k, u64 offset, s64 sectors)
966 bch_add_sectors(s, k, offset, -sectors);
969 /* These wrappers subtract exactly the sectors that we're removing from @k */
970 static void bch_cut_subtract_back(struct extent_insert_state *s,
971 struct bpos where, struct bkey_s k)
973 bch_subtract_sectors(s, k.s_c, where.offset,
974 k.k->p.offset - where.offset);
975 bch_cut_back(where, k.k);
978 static void bch_cut_subtract_front(struct extent_insert_state *s,
979 struct bpos where, struct bkey_s k)
981 bch_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
982 where.offset - bkey_start_offset(k.k));
983 __bch_cut_front(where, k);
986 static void bch_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
989 bch_subtract_sectors(s, k.s_c,
990 bkey_start_offset(k.k), k.k->size);
992 __set_bkey_deleted(k.k);
996 * Note: If this returns true because only some pointers matched,
997 * we can lose some caching that had happened in the interim.
998 * Because cache promotion only promotes the part of the extent
999 * actually read, and not the whole extent, and due to the key
1000 * splitting done in bch_extent_insert_fixup, preserving such
1001 * caching is difficult.
1003 static bool bch_extent_cmpxchg_cmp(struct bkey_s_c l, struct bkey_s_c r)
1005 struct bkey_s_c_extent le, re;
1006 const struct bch_extent_ptr *lp, *rp;
1009 BUG_ON(!l.k->size || !r.k->size);
1011 if (l.k->type != r.k->type ||
1012 bversion_cmp(l.k->version, r.k->version))
1015 switch (l.k->type) {
1016 case KEY_TYPE_COOKIE:
1017 return !memcmp(bkey_s_c_to_cookie(l).v,
1018 bkey_s_c_to_cookie(r).v,
1019 sizeof(struct bch_cookie));
1022 case BCH_EXTENT_CACHED:
1023 le = bkey_s_c_to_extent(l);
1024 re = bkey_s_c_to_extent(r);
1027 * bkey_cmpxchg() handles partial matches - when either l or r
1028 * has been trimmed - so we need just to handle l or r not
1029 * starting at the same place when checking for a match here.
1031 * If the starts of the keys are different, we just apply that
1032 * offset to the device pointer offsets when checking those -
1033 * matching how bch_cut_front() adjusts device pointer offsets
1034 * when adjusting the start of a key:
1036 offset = bkey_start_offset(l.k) - bkey_start_offset(r.k);
1039 * XXX: perhaps we only raced with copygc or tiering replacing
1040 * one of the pointers: it should suffice to find _any_ matching
1044 if (bkey_val_u64s(le.k) != bkey_val_u64s(re.k))
1047 extent_for_each_ptr(le, lp) {
1048 const union bch_extent_entry *entry =
1049 vstruct_idx(re.v, (u64 *) lp - le.v->_data);
1051 if (!extent_entry_is_ptr(entry))
1056 if (lp->offset != rp->offset + offset ||
1057 lp->dev != rp->dev ||
1070 * Returns true on success, false on failure (and false means @new no longer
1073 * If returned true, we may have inserted up to one key in @b.
1074 * If returned false, we may have inserted up to two keys in @b.
1076 * On return, there is room in @res for at least one more key of the same size
1079 enum extent_insert_hook_ret bch_extent_cmpxchg(struct extent_insert_hook *hook,
1080 struct bpos committed_pos,
1081 struct bpos next_pos,
1083 const struct bkey_i *new)
1085 struct bch_replace_info *replace = container_of(hook,
1086 struct bch_replace_info, hook);
1087 struct bkey_i *old = &replace->key;
1089 EBUG_ON(bkey_cmp(committed_pos, bkey_start_pos(&new->k)) < 0);
1091 /* must have something to compare against */
1092 EBUG_ON(!bkey_val_u64s(&old->k));
1094 /* new must be a subset of old */
1095 EBUG_ON(bkey_cmp(new->k.p, old->k.p) > 0 ||
1096 bkey_cmp(bkey_start_pos(&new->k), bkey_start_pos(&old->k)) < 0);
1098 if (k.k && bch_extent_cmpxchg_cmp(k, bkey_i_to_s_c(old))) {
1099 replace->successes++;
1100 return BTREE_HOOK_DO_INSERT;
1102 replace->failures++;
1103 return BTREE_HOOK_NO_INSERT;
1107 static bool bch_extent_merge_inline(struct cache_set *,
1108 struct btree_iter *,
1109 struct bkey_packed *,
1110 struct bkey_packed *,
1113 #define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC)
1115 static enum btree_insert_ret
1116 extent_insert_should_stop(struct extent_insert_state *s)
1118 struct btree *b = s->insert->iter->nodes[0];
1121 * Check if we have sufficient space in both the btree node and the
1122 * journal reservation:
1124 * Each insert checks for room in the journal entry, but we check for
1125 * room in the btree node up-front. In the worst case, bkey_cmpxchg()
1126 * will insert two keys, and one iteration of this room will insert one
1127 * key, so we need room for three keys.
1129 if (!bch_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s))
1130 return BTREE_INSERT_BTREE_NODE_FULL;
1131 else if (!journal_res_insert_fits(s->trans, s->insert))
1132 return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */
1134 return BTREE_INSERT_OK;
1137 static void extent_bset_insert(struct cache_set *c, struct btree_iter *iter,
1138 struct bkey_i *insert)
1140 struct btree *b = iter->nodes[0];
1141 struct btree_node_iter *node_iter = &iter->node_iters[0];
1142 struct bset_tree *t = bset_tree_last(b);
1143 struct bkey_packed *where =
1144 bch_btree_node_iter_bset_pos(node_iter, b, t);
1145 struct bkey_packed *prev = bkey_prev(b, t, where);
1146 struct bkey_packed *next_live_key = where;
1147 unsigned clobber_u64s;
1150 where = bkey_next(prev);
1152 while (next_live_key != btree_bkey_last(b, t) &&
1153 bkey_deleted(next_live_key))
1154 next_live_key = bkey_next(next_live_key);
1157 * Everything between where and next_live_key is now deleted keys, and
1160 clobber_u64s = (u64 *) next_live_key - (u64 *) where;
1163 bch_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
1164 goto drop_deleted_keys;
1166 if (next_live_key != btree_bkey_last(b, t) &&
1167 bch_extent_merge_inline(c, iter, bkey_to_packed(insert),
1168 next_live_key, false))
1169 goto drop_deleted_keys;
1171 bch_bset_insert(b, node_iter, where, insert, clobber_u64s);
1172 bch_btree_node_iter_fix(iter, b, node_iter, t, where,
1173 clobber_u64s, where->u64s);
1176 bch_bset_delete(b, where, clobber_u64s);
1177 bch_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0);
1180 static void extent_insert_committed(struct extent_insert_state *s)
1182 struct cache_set *c = s->trans->c;
1183 struct btree_iter *iter = s->insert->iter;
1184 struct bkey_i *insert = !s->deleting
1187 BKEY_PADDED(k) split;
1189 EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
1190 EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
1192 if (!bkey_cmp(s->committed, bkey_start_pos(&insert->k)))
1195 if (s->deleting && !s->do_journal) {
1196 bch_cut_front(s->committed, insert);
1200 EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
1202 bkey_copy(&split.k, insert);
1204 if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY) &&
1205 bkey_cmp(s->committed, insert->k.p) &&
1206 bkey_extent_is_compressed(bkey_i_to_s_c(insert))) {
1207 /* XXX: possibly need to increase our reservation? */
1208 bch_cut_subtract_back(s, s->committed,
1209 bkey_i_to_s(&split.k));
1210 bch_cut_front(s->committed, insert);
1211 bch_add_sectors(s, bkey_i_to_s_c(insert),
1212 bkey_start_offset(&insert->k),
1215 bch_cut_back(s->committed, &split.k.k);
1216 bch_cut_front(s->committed, insert);
1219 if (debug_check_bkeys(c))
1220 bkey_debugcheck(c, iter->nodes[iter->level],
1221 bkey_i_to_s_c(&split.k));
1223 bch_btree_journal_key(s->trans, iter, &split.k);
1226 extent_bset_insert(c, iter, &split.k);
1228 bch_btree_iter_set_pos_same_leaf(iter, s->committed);
1230 insert->k.needs_whiteout = false;
1231 s->do_journal = false;
1232 s->trans->did_work = true;
1235 static enum extent_insert_hook_ret
1236 __extent_insert_advance_pos(struct extent_insert_state *s,
1237 struct bpos next_pos,
1240 struct extent_insert_hook *hook = s->trans->hook;
1241 enum extent_insert_hook_ret ret;
1244 * Currently disabled for encryption - broken with fcollapse. Will have
1245 * to reenable when versions are exposed for send/receive - versions
1246 * will have to be monotonic then:
1248 if (k.k && k.k->size &&
1249 !bversion_zero(s->insert->k->k.version) &&
1250 bversion_cmp(k.k->version, s->insert->k->k.version) > 0) {
1251 ret = BTREE_HOOK_NO_INSERT;
1255 ret = hook->fn(hook, s->committed, next_pos, k, s->insert->k);
1257 ret = BTREE_HOOK_DO_INSERT;
1259 EBUG_ON(bkey_deleted(&s->insert->k->k) || !s->insert->k->k.size);
1262 case BTREE_HOOK_DO_INSERT:
1264 case BTREE_HOOK_NO_INSERT:
1265 extent_insert_committed(s);
1266 bch_cut_subtract_front(s, next_pos, bkey_i_to_s(s->insert->k));
1268 bch_btree_iter_set_pos_same_leaf(s->insert->iter, next_pos);
1270 case BTREE_HOOK_RESTART_TRANS:
1274 s->committed = next_pos;
1279 * Update iter->pos, marking how much of @insert we've processed, and call hook
1282 static enum extent_insert_hook_ret
1283 extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
1285 struct btree *b = s->insert->iter->nodes[0];
1286 struct bpos next_pos = bpos_min(s->insert->k->k.p,
1287 k.k ? k.k->p : b->key.k.p);
1290 if (k.k && bkey_cmp(s->committed, bkey_start_pos(k.k)) < 0) {
1291 bool have_uncommitted = bkey_cmp(s->committed,
1292 bkey_start_pos(&s->insert->k->k)) > 0;
1294 switch (__extent_insert_advance_pos(s, bkey_start_pos(k.k),
1296 case BTREE_HOOK_DO_INSERT:
1298 case BTREE_HOOK_NO_INSERT:
1300 * we had to split @insert and insert the committed
1301 * part - need to bail out and recheck journal
1302 * reservation/btree node before we advance pos past @k:
1304 if (have_uncommitted)
1305 return BTREE_HOOK_NO_INSERT;
1307 case BTREE_HOOK_RESTART_TRANS:
1308 return BTREE_HOOK_RESTART_TRANS;
1312 /* avoid redundant calls to hook fn: */
1313 if (!bkey_cmp(s->committed, next_pos))
1314 return BTREE_HOOK_DO_INSERT;
1316 return __extent_insert_advance_pos(s, next_pos, k);
1319 static enum btree_insert_ret
1320 extent_insert_check_split_compressed(struct extent_insert_state *s,
1322 enum bch_extent_overlap overlap)
1324 struct cache_set *c = s->trans->c;
1327 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1328 (sectors = bkey_extent_is_compressed(k))) {
1329 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
1331 if (s->trans->flags & BTREE_INSERT_NOFAIL)
1332 flags |= BCH_DISK_RESERVATION_NOFAIL;
1334 switch (bch_disk_reservation_add(c,
1340 return BTREE_INSERT_ENOSPC;
1342 return BTREE_INSERT_NEED_GC_LOCK;
1348 return BTREE_INSERT_OK;
1351 static enum btree_insert_ret
1352 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1353 struct bset_tree *t, struct bkey_packed *_k, struct bkey_s k,
1354 enum bch_extent_overlap overlap)
1356 struct cache_set *c = s->trans->c;
1357 struct btree_iter *iter = s->insert->iter;
1358 struct btree *b = iter->nodes[0];
1359 struct btree_node_iter *node_iter = &iter->node_iters[0];
1362 case BCH_EXTENT_OVERLAP_FRONT:
1363 /* insert overlaps with start of k: */
1364 bch_cut_subtract_front(s, insert->k.p, k);
1365 BUG_ON(bkey_deleted(k.k));
1366 extent_save(b, node_iter, _k, k.k);
1369 case BCH_EXTENT_OVERLAP_BACK:
1370 /* insert overlaps with end of k: */
1371 bch_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
1372 BUG_ON(bkey_deleted(k.k));
1373 extent_save(b, node_iter, _k, k.k);
1376 * As the auxiliary tree is indexed by the end of the
1377 * key and we've just changed the end, update the
1380 bch_bset_fix_invalidated_key(b, t, _k);
1381 bch_btree_node_iter_fix(iter, b, node_iter, t,
1382 _k, _k->u64s, _k->u64s);
1385 case BCH_EXTENT_OVERLAP_ALL: {
1386 struct bpos orig_pos = k.k->p;
1388 /* The insert key completely covers k, invalidate k */
1389 if (!bkey_whiteout(k.k))
1390 btree_keys_account_key_drop(&b->nr,
1393 bch_drop_subtract(s, k);
1394 k.k->p = bkey_start_pos(&insert->k);
1395 if (!__extent_save(b, node_iter, _k, k.k)) {
1397 * Couldn't repack: we aren't necessarily able
1398 * to repack if the new key is outside the range
1399 * of the old extent, so we have to split
1403 extent_save(b, node_iter, _k, k.k);
1405 if (extent_insert_advance_pos(s, k.s_c) ==
1406 BTREE_HOOK_RESTART_TRANS)
1407 return BTREE_INSERT_NEED_TRAVERSE;
1409 extent_insert_committed(s);
1411 * We split and inserted upto at k.k->p - that
1412 * has to coincide with iter->pos, so that we
1413 * don't have anything more we have to insert
1414 * until we recheck our journal reservation:
1416 EBUG_ON(bkey_cmp(s->committed, k.k->p));
1418 bch_bset_fix_invalidated_key(b, t, _k);
1419 bch_btree_node_iter_fix(iter, b, node_iter, t,
1420 _k, _k->u64s, _k->u64s);
1425 case BCH_EXTENT_OVERLAP_MIDDLE: {
1426 BKEY_PADDED(k) split;
1428 * The insert key falls 'in the middle' of k
1429 * The insert key splits k in 3:
1430 * - start only in k, preserve
1431 * - middle common section, invalidate in k
1432 * - end only in k, preserve
1434 * We update the old key to preserve the start,
1435 * insert will be the new common section,
1436 * we manually insert the end that we are preserving.
1438 * modify k _before_ doing the insert (which will move
1441 bkey_reassemble(&split.k, k.s_c);
1442 split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
1444 bch_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1445 BUG_ON(bkey_deleted(&split.k.k));
1447 bch_cut_subtract_front(s, insert->k.p, k);
1448 BUG_ON(bkey_deleted(k.k));
1449 extent_save(b, node_iter, _k, k.k);
1451 bch_add_sectors(s, bkey_i_to_s_c(&split.k),
1452 bkey_start_offset(&split.k.k),
1454 extent_bset_insert(c, iter, &split.k);
1459 return BTREE_INSERT_OK;
1462 static enum btree_insert_ret
1463 bch_delete_fixup_extent(struct extent_insert_state *s)
1465 struct cache_set *c = s->trans->c;
1466 struct btree_iter *iter = s->insert->iter;
1467 struct btree *b = iter->nodes[0];
1468 struct btree_node_iter *node_iter = &iter->node_iters[0];
1469 struct bkey_packed *_k;
1470 struct bkey unpacked;
1471 struct bkey_i *insert = s->insert->k;
1472 enum btree_insert_ret ret = BTREE_INSERT_OK;
1474 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1476 s->whiteout = *insert;
1477 s->do_journal = false;
1479 while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1480 (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
1481 (_k = bch_btree_node_iter_peek_all(node_iter, b))) {
1482 struct bset_tree *t = bch_bkey_to_bset(b, _k);
1483 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1484 enum bch_extent_overlap overlap;
1486 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1487 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1489 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1492 if (bkey_whiteout(k.k)) {
1493 s->committed = bpos_min(insert->k.p, k.k->p);
1497 overlap = bch_extent_overlap(&insert->k, k.k);
1499 ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
1500 if (ret != BTREE_INSERT_OK)
1503 switch (extent_insert_advance_pos(s, k.s_c)) {
1504 case BTREE_HOOK_DO_INSERT:
1506 case BTREE_HOOK_NO_INSERT:
1508 case BTREE_HOOK_RESTART_TRANS:
1509 ret = BTREE_INSERT_NEED_TRAVERSE;
1513 s->do_journal = true;
1515 if (overlap == BCH_EXTENT_OVERLAP_ALL) {
1516 btree_keys_account_key_drop(&b->nr,
1518 bch_subtract_sectors(s, k.s_c,
1519 bkey_start_offset(k.k), k.k->size);
1520 _k->type = KEY_TYPE_DISCARD;
1521 reserve_whiteout(b, t, _k);
1522 } else if (k.k->needs_whiteout ||
1523 bset_written(b, bset(b, t))) {
1524 struct bkey_i discard = *insert;
1527 case BCH_EXTENT_OVERLAP_FRONT:
1528 bch_cut_front(bkey_start_pos(k.k), &discard);
1530 case BCH_EXTENT_OVERLAP_BACK:
1531 bch_cut_back(k.k->p, &discard.k);
1537 discard.k.needs_whiteout = true;
1539 ret = extent_squash(s, insert, t, _k, k, overlap);
1540 BUG_ON(ret != BTREE_INSERT_OK);
1542 extent_bset_insert(c, iter, &discard);
1544 ret = extent_squash(s, insert, t, _k, k, overlap);
1545 BUG_ON(ret != BTREE_INSERT_OK);
1548 bch_cut_front(s->committed, insert);
1549 bch_btree_iter_set_pos_same_leaf(iter, s->committed);
1552 if (bkey_cmp(s->committed, insert->k.p) < 0 &&
1553 ret == BTREE_INSERT_OK &&
1554 extent_insert_advance_pos(s, bkey_s_c_null) == BTREE_HOOK_RESTART_TRANS)
1555 ret = BTREE_INSERT_NEED_TRAVERSE;
1557 extent_insert_committed(s);
1559 bch_cache_set_stats_apply(c, &s->stats, s->trans->disk_res,
1560 gc_pos_btree_node(b));
1562 EBUG_ON(bkey_cmp(iter->pos, s->committed));
1563 EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) != iter->at_end_of_leaf);
1565 bch_cut_front(iter->pos, insert);
1567 if (insert->k.size && iter->at_end_of_leaf)
1568 ret = BTREE_INSERT_NEED_TRAVERSE;
1570 EBUG_ON(insert->k.size && ret == BTREE_INSERT_OK);
1576 * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1578 * this may result in not actually doing the insert, or inserting some subset
1579 * of the insert key. For cmpxchg operations this is where that logic lives.
1581 * All subsets of @insert that need to be inserted are inserted using
1582 * bch_btree_insert_and_journal(). If @b or @res fills up, this function
1583 * returns false, setting @iter->pos for the prefix of @insert that actually got
1586 * BSET INVARIANTS: this function is responsible for maintaining all the
1587 * invariants for bsets of extents in memory. things get really hairy with 0
1592 * bkey_start_pos(bkey_next(k)) >= k
1593 * or bkey_start_offset(bkey_next(k)) >= k->offset
1595 * i.e. strict ordering, no overlapping extents.
1597 * multiple bsets (i.e. full btree node):
1600 * k.size != 0 ∧ j.size != 0 →
1601 * ¬ (k > bkey_start_pos(j) ∧ k < j)
1603 * i.e. no two overlapping keys _of nonzero size_
1605 * We can't realistically maintain this invariant for zero size keys because of
1606 * the key merging done in bch_btree_insert_key() - for two mergeable keys k, j
1607 * there may be another 0 size key between them in another bset, and it will
1608 * thus overlap with the merged key.
1610 * In addition, the end of iter->pos indicates how much has been processed.
1611 * If the end of iter->pos is not the same as the end of insert, then
1612 * key insertion needs to continue/be retried.
1614 enum btree_insert_ret
1615 bch_insert_fixup_extent(struct btree_insert *trans,
1616 struct btree_insert_entry *insert)
1618 struct cache_set *c = trans->c;
1619 struct btree_iter *iter = insert->iter;
1620 struct btree *b = iter->nodes[0];
1621 struct btree_node_iter *node_iter = &iter->node_iters[0];
1622 struct bkey_packed *_k;
1623 struct bkey unpacked;
1624 enum btree_insert_ret ret = BTREE_INSERT_OK;
1626 struct extent_insert_state s = {
1629 .committed = insert->iter->pos,
1630 .deleting = bkey_whiteout(&insert->k->k),
1633 EBUG_ON(iter->level);
1634 EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size);
1637 return bch_delete_fixup_extent(&s);
1640 * As we process overlapping extents, we advance @iter->pos both to
1641 * signal to our caller (btree_insert_key()) how much of @insert->k has
1642 * been inserted, and also to keep @iter->pos consistent with
1643 * @insert->k and the node iterator that we're advancing:
1645 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1647 if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1648 bch_add_sectors(&s, bkey_i_to_s_c(insert->k),
1649 bkey_start_offset(&insert->k->k),
1652 while (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
1653 (ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK &&
1654 (_k = bch_btree_node_iter_peek_all(node_iter, b))) {
1655 struct bset_tree *t = bch_bkey_to_bset(b, _k);
1656 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1657 enum bch_extent_overlap overlap;
1659 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1660 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1662 if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
1665 overlap = bch_extent_overlap(&insert->k->k, k.k);
1667 ret = extent_insert_check_split_compressed(&s, k.s_c, overlap);
1668 if (ret != BTREE_INSERT_OK)
1675 * Only call advance pos & call hook for nonzero size extents:
1676 * If hook returned BTREE_HOOK_NO_INSERT, @insert->k no longer
1679 switch (extent_insert_advance_pos(&s, k.s_c)) {
1680 case BTREE_HOOK_DO_INSERT:
1682 case BTREE_HOOK_NO_INSERT:
1684 case BTREE_HOOK_RESTART_TRANS:
1685 ret = BTREE_INSERT_NEED_TRAVERSE;
1690 (k.k->needs_whiteout || bset_written(b, bset(b, t))))
1691 insert->k->k.needs_whiteout = true;
1693 if (overlap == BCH_EXTENT_OVERLAP_ALL &&
1694 bkey_whiteout(k.k) &&
1695 k.k->needs_whiteout) {
1696 unreserve_whiteout(b, t, _k);
1697 _k->needs_whiteout = false;
1700 ret = extent_squash(&s, insert->k, t, _k, k, overlap);
1701 if (ret != BTREE_INSERT_OK)
1705 if (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
1706 ret == BTREE_INSERT_OK &&
1707 extent_insert_advance_pos(&s, bkey_s_c_null) == BTREE_HOOK_RESTART_TRANS)
1708 ret = BTREE_INSERT_NEED_TRAVERSE;
1710 extent_insert_committed(&s);
1712 * Subtract any remaining sectors from @insert, if we bailed out early
1713 * and didn't fully insert @insert:
1715 if (insert->k->k.size &&
1716 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1717 bch_subtract_sectors(&s, bkey_i_to_s_c(insert->k),
1718 bkey_start_offset(&insert->k->k),
1721 bch_cache_set_stats_apply(c, &s.stats, trans->disk_res,
1722 gc_pos_btree_node(b));
1724 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1725 EBUG_ON(bkey_cmp(iter->pos, s.committed));
1726 EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) != iter->at_end_of_leaf);
1728 if (insert->k->k.size && iter->at_end_of_leaf)
1729 ret = BTREE_INSERT_NEED_TRAVERSE;
1731 EBUG_ON(insert->k->k.size && ret == BTREE_INSERT_OK);
1736 static const char *bch_extent_invalid(const struct cache_set *c,
1739 if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
1740 return "value too big";
1743 return "zero key size";
1745 switch (k.k->type) {
1747 case BCH_EXTENT_CACHED: {
1748 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1749 const union bch_extent_entry *entry;
1750 const union bch_extent_crc *crc;
1751 const struct bch_extent_ptr *ptr;
1752 struct cache_member_rcu *mi = cache_member_info_get(c);
1753 unsigned size_ondisk = e.k->size;
1756 extent_for_each_entry(e, entry) {
1757 reason = "invalid extent entry type";
1758 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1761 if (extent_entry_is_crc(entry)) {
1762 crc = entry_to_crc(entry);
1764 reason = "checksum offset + key size > uncompressed size";
1765 if (crc_offset(crc) + e.k->size >
1766 crc_uncompressed_size(e.k, crc))
1769 size_ondisk = crc_compressed_size(e.k, crc);
1771 reason = "invalid checksum type";
1772 if (!bch_checksum_type_valid(c, crc_csum_type(crc)))
1775 reason = "invalid compression type";
1776 if (crc_compression_type(crc) >= BCH_COMPRESSION_NR)
1779 ptr = entry_to_ptr(entry);
1781 reason = extent_ptr_invalid(e, mi,
1782 &entry->ptr, size_ondisk);
1788 cache_member_info_put();
1791 cache_member_info_put();
1795 case BCH_RESERVATION: {
1796 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1798 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1799 return "incorrect value size";
1801 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1802 return "invalid nr_replicas";
1808 return "invalid value type";
1812 static void bch_extent_debugcheck_extent(struct cache_set *c, struct btree *b,
1813 struct bkey_s_c_extent e)
1815 const struct bch_extent_ptr *ptr;
1816 struct cache_member_rcu *mi;
1819 unsigned seq, stale;
1822 unsigned ptrs_per_tier[BCH_TIER_MAX];
1823 unsigned tier, replicas = 0;
1826 * XXX: we should be doing most/all of these checks at startup time,
1827 * where we check bkey_invalid() in btree_node_read_done()
1829 * But note that we can't check for stale pointers or incorrect gc marks
1830 * until after journal replay is done (it might be an extent that's
1831 * going to get overwritten during replay)
1834 memset(ptrs_per_tier, 0, sizeof(ptrs_per_tier));
1836 mi = cache_member_info_get(c);
1838 extent_for_each_ptr(e, ptr) {
1841 if (ptr->dev >= mi->nr_devices)
1845 * If journal replay hasn't finished, we might be seeing keys
1846 * that will be overwritten by the time journal replay is done:
1848 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1851 if (!mi->m[ptr->dev].valid)
1854 tier = mi->m[ptr->dev].tier;
1855 ptrs_per_tier[tier]++;
1859 if ((ca = PTR_CACHE(c, ptr))) {
1860 g = PTR_BUCKET(ca, ptr);
1863 struct bucket_mark mark;
1865 seq = read_seqcount_begin(&c->gc_pos_lock);
1866 mark = READ_ONCE(g->mark);
1868 /* between mark and bucket gen */
1871 stale = ptr_stale(ca, ptr);
1873 cache_set_bug_on(stale && !ptr->cached, c,
1874 "stale dirty pointer");
1876 cache_set_bug_on(stale > 96, c,
1877 "key too stale: %i",
1883 bad = (mark.is_metadata ||
1884 (gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
1885 !mark.owned_by_allocator &&
1887 ? mark.cached_sectors
1888 : mark.dirty_sectors)));
1889 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
1895 cache_member_info_put();
1897 if (replicas > BCH_REPLICAS_MAX) {
1898 bch_bkey_val_to_text(c, btree_node_type(b), buf,
1899 sizeof(buf), e.s_c);
1901 "extent key bad (too many replicas: %u): %s",
1906 if (!bkey_extent_is_cached(e.k) &&
1907 replicas < c->sb.data_replicas_have) {
1908 bch_bkey_val_to_text(c, btree_node_type(b), buf,
1909 sizeof(buf), e.s_c);
1911 "extent key bad (too few replicas, %u < %u): %s",
1912 replicas, c->sb.data_replicas_have, buf);
1919 bch_bkey_val_to_text(c, btree_node_type(b), buf,
1920 sizeof(buf), e.s_c);
1921 cache_set_bug(c, "extent pointer to dev %u missing device: %s",
1923 cache_member_info_put();
1927 bch_bkey_val_to_text(c, btree_node_type(b), buf,
1928 sizeof(buf), e.s_c);
1929 cache_set_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu prio %i "
1930 "gen %i last_gc %i mark 0x%08x",
1931 buf, PTR_BUCKET_NR(ca, ptr),
1932 g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
1933 ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
1934 (unsigned) g->mark.counter);
1935 cache_member_info_put();
1939 static void bch_extent_debugcheck(struct cache_set *c, struct btree *b,
1942 switch (k.k->type) {
1944 case BCH_EXTENT_CACHED:
1945 bch_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
1947 case BCH_RESERVATION:
1954 static void bch_extent_to_text(struct cache_set *c, char *buf,
1955 size_t size, struct bkey_s_c k)
1957 char *out = buf, *end = buf + size;
1958 const char *invalid;
1960 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
1962 if (bkey_extent_is_data(k.k))
1963 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
1965 invalid = bch_extent_invalid(c, k);
1967 p(" invalid: %s", invalid);
1971 static unsigned PTR_TIER(struct cache_member_rcu *mi,
1972 const struct bch_extent_ptr *ptr)
1974 return ptr->dev < mi->nr_devices
1975 ? mi->m[ptr->dev].tier
1979 static void bch_extent_crc_init(union bch_extent_crc *crc,
1980 unsigned compressed_size,
1981 unsigned uncompressed_size,
1982 unsigned compression_type,
1984 struct bch_csum csum, unsigned csum_type)
1986 if (bch_crc_bytes[csum_type] <= 4 &&
1987 uncompressed_size <= CRC32_SIZE_MAX &&
1988 nonce <= CRC32_NONCE_MAX) {
1989 crc->crc32 = (struct bch_extent_crc32) {
1990 .type = 1 << BCH_EXTENT_ENTRY_crc32,
1991 ._compressed_size = compressed_size - 1,
1992 ._uncompressed_size = uncompressed_size - 1,
1994 .compression_type = compression_type,
1995 .csum_type = csum_type,
1996 .csum = *((__le32 *) &csum.lo),
2001 if (bch_crc_bytes[csum_type] <= 10 &&
2002 uncompressed_size <= CRC64_SIZE_MAX &&
2003 nonce <= CRC64_NONCE_MAX) {
2004 crc->crc64 = (struct bch_extent_crc64) {
2005 .type = 1 << BCH_EXTENT_ENTRY_crc64,
2006 ._compressed_size = compressed_size - 1,
2007 ._uncompressed_size = uncompressed_size - 1,
2010 .compression_type = compression_type,
2011 .csum_type = csum_type,
2013 .csum_hi = *((__le16 *) &csum.hi),
2018 if (bch_crc_bytes[csum_type] <= 16 &&
2019 uncompressed_size <= CRC128_SIZE_MAX &&
2020 nonce <= CRC128_NONCE_MAX) {
2021 crc->crc128 = (struct bch_extent_crc128) {
2022 .type = 1 << BCH_EXTENT_ENTRY_crc128,
2023 ._compressed_size = compressed_size - 1,
2024 ._uncompressed_size = uncompressed_size - 1,
2027 .compression_type = compression_type,
2028 .csum_type = csum_type,
2037 void bch_extent_crc_append(struct bkey_i_extent *e,
2038 unsigned compressed_size,
2039 unsigned uncompressed_size,
2040 unsigned compression_type,
2042 struct bch_csum csum, unsigned csum_type)
2044 union bch_extent_crc *crc;
2046 BUG_ON(compressed_size > uncompressed_size);
2047 BUG_ON(uncompressed_size != e->k.size);
2048 BUG_ON(!compressed_size || !uncompressed_size);
2051 * Look up the last crc entry, so we can check if we need to add
2054 extent_for_each_crc(extent_i_to_s(e), crc)
2057 if (!crc && !csum_type && !compression_type)
2061 crc_compressed_size(&e->k, crc) == compressed_size &&
2062 crc_uncompressed_size(&e->k, crc) == uncompressed_size &&
2063 crc_offset(crc) == 0 &&
2064 crc_nonce(crc) == nonce &&
2065 crc_csum_type(crc) == csum_type &&
2066 crc_compression_type(crc) == compression_type &&
2067 crc_csum(crc).lo == csum.lo &&
2068 crc_csum(crc).hi == csum.hi)
2071 bch_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)),
2075 nonce, csum, csum_type);
2076 __extent_entry_push(e);
2080 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
2082 * Returns true if @k should be dropped entirely
2084 * For existing keys, only called when btree nodes are being rewritten, not when
2085 * they're merely being compacted/resorted in memory.
2087 bool bch_extent_normalize(struct cache_set *c, struct bkey_s k)
2089 struct bkey_s_extent e;
2091 switch (k.k->type) {
2092 case KEY_TYPE_ERROR:
2095 case KEY_TYPE_DELETED:
2096 case KEY_TYPE_COOKIE:
2099 case KEY_TYPE_DISCARD:
2100 return bversion_zero(k.k->version);
2103 case BCH_EXTENT_CACHED:
2104 e = bkey_s_to_extent(k);
2106 bch_extent_drop_stale(c, e);
2108 if (!bkey_val_u64s(e.k)) {
2109 if (bkey_extent_is_cached(e.k)) {
2110 k.k->type = KEY_TYPE_DISCARD;
2111 if (bversion_zero(k.k->version))
2114 k.k->type = KEY_TYPE_ERROR;
2119 case BCH_RESERVATION:
2126 void bch_extent_mark_replicas_cached(struct cache_set *c,
2127 struct bkey_s_extent e,
2130 struct bch_extent_ptr *ptr;
2131 struct cache_member_rcu *mi;
2132 bool have_higher_tier;
2138 mi = cache_member_info_get(c);
2141 have_higher_tier = false;
2143 extent_for_each_ptr(e, ptr) {
2145 PTR_TIER(mi, ptr) == tier) {
2152 if (PTR_TIER(mi, ptr) > tier)
2153 have_higher_tier = true;
2157 } while (have_higher_tier);
2159 cache_member_info_put();
2163 * This picks a non-stale pointer, preferabbly from a device other than
2164 * avoid. Avoid can be NULL, meaning pick any. If there are no non-stale
2165 * pointers to other devices, it will still pick a pointer from avoid.
2166 * Note that it prefers lowered-numbered pointers to higher-numbered pointers
2167 * as the pointers are sorted by tier, hence preferring pointers to tier 0
2168 * rather than pointers to tier 1.
2170 void bch_extent_pick_ptr_avoiding(struct cache_set *c, struct bkey_s_c k,
2171 struct cache *avoid,
2172 struct extent_pick_ptr *ret)
2174 struct bkey_s_c_extent e;
2175 const union bch_extent_crc *crc;
2176 const struct bch_extent_ptr *ptr;
2179 switch (k.k->type) {
2180 case KEY_TYPE_DELETED:
2181 case KEY_TYPE_DISCARD:
2182 case KEY_TYPE_COOKIE:
2186 case KEY_TYPE_ERROR:
2187 ret->ca = ERR_PTR(-EIO);
2191 case BCH_EXTENT_CACHED:
2192 e = bkey_s_c_to_extent(k);
2196 extent_for_each_online_device_crc(c, e, crc, ptr, ca)
2197 if (!ptr_stale(ca, ptr)) {
2198 *ret = (struct extent_pick_ptr) {
2199 .crc = crc_to_128(e.k, crc),
2209 percpu_ref_get(&ret->ca->ref);
2210 else if (!bkey_extent_is_cached(e.k))
2211 ret->ca = ERR_PTR(-EIO);
2216 case BCH_RESERVATION:
2225 static enum merge_result bch_extent_merge(struct cache_set *c,
2227 struct bkey_i *l, struct bkey_i *r)
2229 struct bkey_s_extent el, er;
2230 union bch_extent_entry *en_l, *en_r;
2232 if (key_merging_disabled(c))
2233 return BCH_MERGE_NOMERGE;
2236 * Generic header checks
2237 * Assumes left and right are in order
2238 * Left and right must be exactly aligned
2241 if (l->k.u64s != r->k.u64s ||
2242 l->k.type != r->k.type ||
2243 bversion_cmp(l->k.version, r->k.version) ||
2244 bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
2245 return BCH_MERGE_NOMERGE;
2247 switch (l->k.type) {
2248 case KEY_TYPE_DELETED:
2249 case KEY_TYPE_DISCARD:
2250 case KEY_TYPE_ERROR:
2251 /* These types are mergeable, and no val to check */
2255 case BCH_EXTENT_CACHED:
2256 el = bkey_i_to_s_extent(l);
2257 er = bkey_i_to_s_extent(r);
2259 extent_for_each_entry(el, en_l) {
2260 struct bch_extent_ptr *lp, *rp;
2261 struct cache_member_cpu *m;
2263 en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
2265 if ((extent_entry_type(en_l) !=
2266 extent_entry_type(en_r)) ||
2267 extent_entry_is_crc(en_l))
2268 return BCH_MERGE_NOMERGE;
2273 if (lp->offset + el.k->size != rp->offset ||
2274 lp->dev != rp->dev ||
2276 return BCH_MERGE_NOMERGE;
2278 /* We don't allow extents to straddle buckets: */
2280 m = cache_member_info_get(c)->m + lp->dev;
2281 if ((lp->offset & ~((u64) m->bucket_size - 1)) !=
2282 (rp->offset & ~((u64) m->bucket_size - 1))) {
2283 cache_member_info_put();
2284 return BCH_MERGE_NOMERGE;
2287 cache_member_info_put();
2291 case BCH_RESERVATION: {
2292 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
2293 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
2295 if (li->v.generation != ri->v.generation ||
2296 li->v.nr_replicas != ri->v.nr_replicas)
2297 return BCH_MERGE_NOMERGE;
2301 return BCH_MERGE_NOMERGE;
2304 l->k.needs_whiteout |= r->k.needs_whiteout;
2306 /* Keys with no pointers aren't restricted to one bucket and could
2309 if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
2310 bch_key_resize(&l->k, KEY_SIZE_MAX);
2311 bch_cut_front(l->k.p, r);
2312 return BCH_MERGE_PARTIAL;
2315 bch_key_resize(&l->k, l->k.size + r->k.size);
2317 return BCH_MERGE_MERGE;
2320 static void extent_i_save(struct btree *b, struct bkey_packed *dst,
2323 struct bkey_format *f = &b->format;
2324 struct bkey_i *dst_unpacked;
2326 BUG_ON(bkeyp_val_u64s(f, dst) != bkey_val_u64s(&src->k));
2329 * We don't want the bch_verify_key_order() call in extent_save(),
2330 * because we may be out of order with deleted keys that are about to be
2331 * removed by extent_bset_insert()
2334 if ((dst_unpacked = packed_to_bkey(dst)))
2335 bkey_copy(dst_unpacked, src);
2337 BUG_ON(!bkey_pack(dst, src, f));
2340 static bool extent_merge_one_overlapping(struct btree_iter *iter,
2341 struct bpos new_pos,
2342 struct bset_tree *t,
2343 struct bkey_packed *k, struct bkey uk,
2344 bool check, bool could_pack)
2346 struct btree *b = iter->nodes[0];
2347 struct btree_node_iter *node_iter = &iter->node_iters[0];
2349 BUG_ON(!bkey_deleted(k));
2352 return !bkey_packed(k) || could_pack;
2355 extent_save(b, node_iter, k, &uk);
2356 bch_bset_fix_invalidated_key(b, t, k);
2357 bch_btree_node_iter_fix(iter, b, node_iter, t,
2358 k, k->u64s, k->u64s);
2363 static bool extent_merge_do_overlapping(struct btree_iter *iter,
2364 struct bkey *m, bool back_merge)
2366 struct btree *b = iter->nodes[0];
2367 struct btree_node_iter *node_iter = &iter->node_iters[0];
2368 struct bset_tree *t;
2369 struct bkey_packed *k;
2371 struct bpos new_pos = back_merge ? m->p : bkey_start_pos(m);
2372 bool could_pack = bkey_pack_pos((void *) &uk, new_pos, b);
2376 * @m is the new merged extent:
2378 * The merge took place in the last bset; we know there can't be any 0
2379 * size extents overlapping with m there because if so they would have
2380 * been between the two extents we merged.
2382 * But in the other bsets, we have to check for and fix such extents:
2385 for_each_bset(b, t) {
2386 if (t == bset_tree_last(b))
2390 * if we don't find this bset in the iterator we already got to
2391 * the end of that bset, so start searching from the end.
2393 k = bch_btree_node_iter_bset_pos(node_iter, b, t);
2395 if (k == btree_bkey_last(b, t))
2396 k = bkey_prev_all(b, t, k);
2402 * Back merge: 0 size extents will be before the key
2403 * that was just inserted (and thus the iterator
2404 * position) - walk backwards to find them
2408 (uk = bkey_unpack_key(b, k),
2409 bkey_cmp(uk.p, bkey_start_pos(m)) > 0);
2410 k = bkey_prev_all(b, t, k)) {
2411 if (bkey_cmp(uk.p, m->p) >= 0)
2414 if (!extent_merge_one_overlapping(iter, new_pos,
2415 t, k, uk, check, could_pack))
2419 /* Front merge - walk forwards */
2421 k != btree_bkey_last(b, t) &&
2422 (uk = bkey_unpack_key(b, k),
2423 bkey_cmp(uk.p, m->p) < 0);
2426 bkey_start_pos(m)) <= 0)
2429 if (!extent_merge_one_overlapping(iter, new_pos,
2430 t, k, uk, check, could_pack))
2445 * When merging an extent that we're inserting into a btree node, the new merged
2446 * extent could overlap with an existing 0 size extent - if we don't fix that,
2447 * it'll break the btree node iterator so this code finds those 0 size extents
2448 * and shifts them out of the way.
2450 * Also unpacks and repacks.
2452 static bool bch_extent_merge_inline(struct cache_set *c,
2453 struct btree_iter *iter,
2454 struct bkey_packed *l,
2455 struct bkey_packed *r,
2458 struct btree *b = iter->nodes[0];
2459 struct btree_node_iter *node_iter = &iter->node_iters[0];
2460 const struct bkey_format *f = &b->format;
2461 struct bset_tree *t = bset_tree_last(b);
2462 struct bkey_packed *m;
2469 * We need to save copies of both l and r, because we might get a
2470 * partial merge (which modifies both) and then fails to repack
2472 bkey_unpack(b, &li.k, l);
2473 bkey_unpack(b, &ri.k, r);
2475 m = back_merge ? l : r;
2476 mi = back_merge ? &li.k : &ri.k;
2478 /* l & r should be in last bset: */
2479 EBUG_ON(bch_bkey_to_bset(b, m) != t);
2481 switch (bch_extent_merge(c, b, &li.k, &ri.k)) {
2482 case BCH_MERGE_NOMERGE:
2484 case BCH_MERGE_PARTIAL:
2485 if (bkey_packed(m) && !bkey_pack_key((void *) &tmp, &mi->k, f))
2488 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2491 extent_i_save(b, m, mi);
2492 bch_bset_fix_invalidated_key(b, t, m);
2495 * Update iterator to reflect what we just inserted - otherwise,
2496 * the iter_fix() call is going to put us _before_ the key we
2497 * just partially merged with:
2500 bch_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
2502 bch_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
2503 t, m, m->u64s, m->u64s);
2506 bkey_copy(packed_to_bkey(l), &li.k);
2508 bkey_copy(packed_to_bkey(r), &ri.k);
2510 case BCH_MERGE_MERGE:
2511 if (bkey_packed(m) && !bkey_pack_key((void *) &tmp, &li.k.k, f))
2514 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2517 extent_i_save(b, m, &li.k);
2518 bch_bset_fix_invalidated_key(b, t, m);
2520 bch_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
2521 t, m, m->u64s, m->u64s);
2528 const struct bkey_ops bch_bkey_extent_ops = {
2529 .key_invalid = bch_extent_invalid,
2530 .key_debugcheck = bch_extent_debugcheck,
2531 .val_to_text = bch_extent_to_text,
2532 .swab = bch_ptr_swab,
2533 .key_normalize = bch_ptr_normalize,
2534 .key_merge = bch_extent_merge,