1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EXTENTS_H
3 #define _BCACHEFS_EXTENTS_H
7 #include "extents_types.h"
11 enum bkey_invalid_flags;
15 #define extent_entry_last(_e) \
16 ((typeof(&(_e).v->start[0])) bkey_val_end(_e))
18 #define entry_to_ptr(_entry) \
20 EBUG_ON((_entry) && !extent_entry_is_ptr(_entry)); \
22 __builtin_choose_expr( \
23 type_is_exact(_entry, const union bch_extent_entry *), \
24 (const struct bch_extent_ptr *) (_entry), \
25 (struct bch_extent_ptr *) (_entry)); \
28 /* downcast, preserves const */
29 #define to_entry(_entry) \
31 BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) && \
32 !type_is(_entry, struct bch_extent_ptr *) && \
33 !type_is(_entry, struct bch_extent_stripe_ptr *)); \
35 __builtin_choose_expr( \
36 (type_is_exact(_entry, const union bch_extent_crc *) || \
37 type_is_exact(_entry, const struct bch_extent_ptr *) ||\
38 type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
39 (const union bch_extent_entry *) (_entry), \
40 (union bch_extent_entry *) (_entry)); \
43 #define extent_entry_next(_entry) \
44 ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
46 static inline unsigned
47 __extent_entry_type(const union bch_extent_entry *e)
49 return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
52 static inline enum bch_extent_entry_type
53 extent_entry_type(const union bch_extent_entry *e)
55 int ret = __ffs(e->type);
57 EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
62 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
64 switch (extent_entry_type(entry)) {
66 case BCH_EXTENT_ENTRY_##f: \
67 return sizeof(struct bch_extent_##f);
68 BCH_EXTENT_ENTRY_TYPES()
75 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
77 return extent_entry_bytes(entry) / sizeof(u64);
80 static inline void __extent_entry_insert(struct bkey_i *k,
81 union bch_extent_entry *dst,
82 union bch_extent_entry *new)
84 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
86 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
87 dst, (u64 *) end - (u64 *) dst);
88 k->k.u64s += extent_entry_u64s(new);
89 memcpy_u64s_small(dst, new, extent_entry_u64s(new));
92 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
94 return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
97 static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
99 return extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
102 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
104 switch (extent_entry_type(e)) {
105 case BCH_EXTENT_ENTRY_crc32:
106 case BCH_EXTENT_ENTRY_crc64:
107 case BCH_EXTENT_ENTRY_crc128:
114 union bch_extent_crc {
116 struct bch_extent_crc32 crc32;
117 struct bch_extent_crc64 crc64;
118 struct bch_extent_crc128 crc128;
121 #define __entry_to_crc(_entry) \
122 __builtin_choose_expr( \
123 type_is_exact(_entry, const union bch_extent_entry *), \
124 (const union bch_extent_crc *) (_entry), \
125 (union bch_extent_crc *) (_entry))
127 #define entry_to_crc(_entry) \
129 EBUG_ON((_entry) && !extent_entry_is_crc(_entry)); \
131 __entry_to_crc(_entry); \
134 static inline struct bch_extent_crc_unpacked
135 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
137 #define common_fields(_crc) \
138 .csum_type = _crc.csum_type, \
139 .compression_type = _crc.compression_type, \
140 .compressed_size = _crc._compressed_size + 1, \
141 .uncompressed_size = _crc._uncompressed_size + 1, \
142 .offset = _crc.offset, \
146 return (struct bch_extent_crc_unpacked) {
147 .compressed_size = k->size,
148 .uncompressed_size = k->size,
149 .live_size = k->size,
152 switch (extent_entry_type(to_entry(crc))) {
153 case BCH_EXTENT_ENTRY_crc32: {
154 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
155 common_fields(crc->crc32),
158 memcpy(&ret.csum.lo, &crc->crc32.csum, sizeof(crc->crc32.csum));
161 case BCH_EXTENT_ENTRY_crc64: {
162 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
163 common_fields(crc->crc64),
164 .nonce = crc->crc64.nonce,
165 .csum.lo = (__force __le64) crc->crc64.csum_lo,
168 u16 hi = crc->crc64.csum_hi;
169 memcpy(&ret.csum.hi, &hi, sizeof(hi));
172 case BCH_EXTENT_ENTRY_crc128: {
173 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
174 common_fields(crc->crc128),
175 .nonce = crc->crc128.nonce,
176 .csum = crc->crc128.csum,
187 static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
189 return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
190 crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
193 /* bkey_ptrs: generically over any key type that has ptrs */
196 const union bch_extent_entry *start;
197 const union bch_extent_entry *end;
201 union bch_extent_entry *start;
202 union bch_extent_entry *end;
205 static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
208 case KEY_TYPE_btree_ptr: {
209 struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
211 return (struct bkey_ptrs_c) {
212 to_entry(&e.v->start[0]),
213 to_entry(extent_entry_last(e))
216 case KEY_TYPE_extent: {
217 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
219 return (struct bkey_ptrs_c) {
224 case KEY_TYPE_stripe: {
225 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
227 return (struct bkey_ptrs_c) {
228 to_entry(&s.v->ptrs[0]),
229 to_entry(&s.v->ptrs[s.v->nr_blocks]),
232 case KEY_TYPE_reflink_v: {
233 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
235 return (struct bkey_ptrs_c) {
240 case KEY_TYPE_btree_ptr_v2: {
241 struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
243 return (struct bkey_ptrs_c) {
244 to_entry(&e.v->start[0]),
245 to_entry(extent_entry_last(e))
249 return (struct bkey_ptrs_c) { NULL, NULL };
253 static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
255 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
257 return (struct bkey_ptrs) {
263 #define __bkey_extent_entry_for_each_from(_start, _end, _entry) \
264 for ((_entry) = (_start); \
266 (_entry) = extent_entry_next(_entry))
268 #define __bkey_ptr_next(_ptr, _end) \
270 typeof(_end) _entry; \
272 __bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry) \
273 if (extent_entry_is_ptr(_entry)) \
276 _entry < (_end) ? entry_to_ptr(_entry) : NULL; \
279 #define bkey_extent_entry_for_each_from(_p, _entry, _start) \
280 __bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
282 #define bkey_extent_entry_for_each(_p, _entry) \
283 bkey_extent_entry_for_each_from(_p, _entry, _p.start)
285 #define __bkey_for_each_ptr(_start, _end, _ptr) \
286 for ((_ptr) = (_start); \
287 ((_ptr) = __bkey_ptr_next(_ptr, _end)); \
290 #define bkey_ptr_next(_p, _ptr) \
291 __bkey_ptr_next(_ptr, (_p).end)
293 #define bkey_for_each_ptr(_p, _ptr) \
294 __bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
296 #define __bkey_ptr_next_decode(_k, _end, _ptr, _entry) \
301 (_ptr).has_ec = false; \
303 __bkey_extent_entry_for_each_from(_entry, _end, _entry) \
304 switch (extent_entry_type(_entry)) { \
305 case BCH_EXTENT_ENTRY_ptr: \
306 (_ptr).ptr = _entry->ptr; \
308 case BCH_EXTENT_ENTRY_crc32: \
309 case BCH_EXTENT_ENTRY_crc64: \
310 case BCH_EXTENT_ENTRY_crc128: \
311 (_ptr).crc = bch2_extent_crc_unpack(_k, \
312 entry_to_crc(_entry)); \
314 case BCH_EXTENT_ENTRY_stripe_ptr: \
315 (_ptr).ec = _entry->stripe_ptr; \
316 (_ptr).has_ec = true; \
326 #define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry) \
327 for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL), \
329 __bkey_ptr_next_decode(_k, _end, _ptr, _entry); \
330 (_entry) = extent_entry_next(_entry))
332 #define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry) \
333 __bkey_for_each_ptr_decode(_k, (_p).start, (_p).end, \
336 #define bkey_crc_next(_k, _start, _end, _crc, _iter) \
338 __bkey_extent_entry_for_each_from(_iter, _end, _iter) \
339 if (extent_entry_is_crc(_iter)) { \
340 (_crc) = bch2_extent_crc_unpack(_k, \
341 entry_to_crc(_iter)); \
348 #define __bkey_for_each_crc(_k, _start, _end, _crc, _iter) \
349 for ((_crc) = bch2_extent_crc_unpack(_k, NULL), \
350 (_iter) = (_start); \
351 bkey_crc_next(_k, _start, _end, _crc, _iter); \
352 (_iter) = extent_entry_next(_iter))
354 #define bkey_for_each_crc(_k, _p, _crc, _iter) \
355 __bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
357 /* Iterate over pointers in KEY_TYPE_extent: */
359 #define extent_for_each_entry_from(_e, _entry, _start) \
360 __bkey_extent_entry_for_each_from(_start, \
361 extent_entry_last(_e), _entry)
363 #define extent_for_each_entry(_e, _entry) \
364 extent_for_each_entry_from(_e, _entry, (_e).v->start)
366 #define extent_ptr_next(_e, _ptr) \
367 __bkey_ptr_next(_ptr, extent_entry_last(_e))
369 #define extent_for_each_ptr(_e, _ptr) \
370 __bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
372 #define extent_for_each_ptr_decode(_e, _ptr, _entry) \
373 __bkey_for_each_ptr_decode((_e).k, (_e).v->start, \
374 extent_entry_last(_e), _ptr, _entry)
376 /* utility code common to all keys with pointers: */
378 void bch2_mark_io_failure(struct bch_io_failures *,
379 struct extent_ptr_decoded *);
380 int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
381 struct bch_io_failures *,
382 struct extent_ptr_decoded *);
384 /* KEY_TYPE_btree_ptr: */
386 int bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c,
387 enum bkey_invalid_flags, struct printbuf *);
388 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
391 int bch2_btree_ptr_v2_invalid(const struct bch_fs *, struct bkey_s_c,
392 enum bkey_invalid_flags, struct printbuf *);
393 void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
394 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
397 #define bch2_bkey_ops_btree_ptr ((struct bkey_ops) { \
398 .key_invalid = bch2_btree_ptr_invalid, \
399 .val_to_text = bch2_btree_ptr_to_text, \
400 .swab = bch2_ptr_swab, \
401 .trans_trigger = bch2_trans_mark_extent, \
402 .atomic_trigger = bch2_mark_extent, \
405 #define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) { \
406 .key_invalid = bch2_btree_ptr_v2_invalid, \
407 .val_to_text = bch2_btree_ptr_v2_to_text, \
408 .swab = bch2_ptr_swab, \
409 .compat = bch2_btree_ptr_v2_compat, \
410 .trans_trigger = bch2_trans_mark_extent, \
411 .atomic_trigger = bch2_mark_extent, \
412 .min_val_size = 40, \
415 /* KEY_TYPE_extent: */
417 bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
419 #define bch2_bkey_ops_extent ((struct bkey_ops) { \
420 .key_invalid = bch2_bkey_ptrs_invalid, \
421 .val_to_text = bch2_bkey_ptrs_to_text, \
422 .swab = bch2_ptr_swab, \
423 .key_normalize = bch2_extent_normalize, \
424 .key_merge = bch2_extent_merge, \
425 .trans_trigger = bch2_trans_mark_extent, \
426 .atomic_trigger = bch2_mark_extent, \
429 /* KEY_TYPE_reservation: */
431 int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c,
432 enum bkey_invalid_flags, struct printbuf *);
433 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
434 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
436 #define bch2_bkey_ops_reservation ((struct bkey_ops) { \
437 .key_invalid = bch2_reservation_invalid, \
438 .val_to_text = bch2_reservation_to_text, \
439 .key_merge = bch2_reservation_merge, \
440 .trans_trigger = bch2_trans_mark_reservation, \
441 .atomic_trigger = bch2_mark_reservation, \
445 /* Extent checksum entries: */
447 bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
448 struct bch_extent_crc_unpacked);
449 bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
450 void bch2_extent_crc_append(struct bkey_i *,
451 struct bch_extent_crc_unpacked);
453 /* Generic code for keys with pointers: */
455 static inline bool bkey_is_btree_ptr(const struct bkey *k)
458 case KEY_TYPE_btree_ptr:
459 case KEY_TYPE_btree_ptr_v2:
466 static inline bool bkey_extent_is_direct_data(const struct bkey *k)
469 case KEY_TYPE_btree_ptr:
470 case KEY_TYPE_btree_ptr_v2:
471 case KEY_TYPE_extent:
472 case KEY_TYPE_reflink_v:
479 static inline bool bkey_extent_is_inline_data(const struct bkey *k)
481 return k->type == KEY_TYPE_inline_data ||
482 k->type == KEY_TYPE_indirect_inline_data;
485 static inline unsigned bkey_inline_data_offset(const struct bkey *k)
488 case KEY_TYPE_inline_data:
489 return sizeof(struct bch_inline_data);
490 case KEY_TYPE_indirect_inline_data:
491 return sizeof(struct bch_indirect_inline_data);
497 static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
499 return bkey_val_bytes(k) - bkey_inline_data_offset(k);
502 #define bkey_inline_data_p(_k) (((void *) (_k).v) + bkey_inline_data_offset((_k).k))
504 static inline bool bkey_extent_is_data(const struct bkey *k)
506 return bkey_extent_is_direct_data(k) ||
507 bkey_extent_is_inline_data(k) ||
508 k->type == KEY_TYPE_reflink_p;
512 * Should extent be counted under inode->i_sectors?
514 static inline bool bkey_extent_is_allocation(const struct bkey *k)
517 case KEY_TYPE_extent:
518 case KEY_TYPE_reservation:
519 case KEY_TYPE_reflink_p:
520 case KEY_TYPE_reflink_v:
521 case KEY_TYPE_inline_data:
522 case KEY_TYPE_indirect_inline_data:
529 static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
531 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
532 const struct bch_extent_ptr *ptr;
534 bkey_for_each_ptr(ptrs, ptr)
540 static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
542 return k.k->type == KEY_TYPE_reservation ||
543 bkey_extent_is_unwritten(k);
546 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
548 struct bch_devs_list ret = (struct bch_devs_list) { 0 };
549 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
550 const struct bch_extent_ptr *ptr;
552 bkey_for_each_ptr(p, ptr)
553 ret.devs[ret.nr++] = ptr->dev;
558 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
560 struct bch_devs_list ret = (struct bch_devs_list) { 0 };
561 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
562 const struct bch_extent_ptr *ptr;
564 bkey_for_each_ptr(p, ptr)
566 ret.devs[ret.nr++] = ptr->dev;
571 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
573 struct bch_devs_list ret = (struct bch_devs_list) { 0 };
574 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
575 const struct bch_extent_ptr *ptr;
577 bkey_for_each_ptr(p, ptr)
579 ret.devs[ret.nr++] = ptr->dev;
584 static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
587 case KEY_TYPE_btree_ptr:
588 case KEY_TYPE_btree_ptr_v2:
589 return BCH_DATA_btree;
590 case KEY_TYPE_extent:
591 case KEY_TYPE_reflink_v:
592 return BCH_DATA_user;
593 case KEY_TYPE_stripe: {
594 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
596 BUG_ON(ptr < s.v->ptrs ||
597 ptr >= s.v->ptrs + s.v->nr_blocks);
599 return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
608 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
609 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
610 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
611 bool bch2_bkey_is_incompressible(struct bkey_s_c);
612 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
614 unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
615 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
616 unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
617 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
619 void bch2_bkey_drop_device(struct bkey_s, unsigned);
620 void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
622 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
624 static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
626 return (void *) bch2_bkey_has_device_c(k.s_c, dev);
629 bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
631 void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
633 static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
635 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev));
638 case KEY_TYPE_btree_ptr:
639 case KEY_TYPE_btree_ptr_v2:
640 case KEY_TYPE_extent:
641 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
643 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
645 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
655 void bch2_extent_ptr_decoded_append(struct bkey_i *,
656 struct extent_ptr_decoded *);
657 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s,
658 struct bch_extent_ptr *);
659 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
660 struct bch_extent_ptr *);
662 #define bch2_bkey_drop_ptrs(_k, _ptr, _cond) \
664 struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k); \
666 _ptr = &_ptrs.start->ptr; \
668 while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) { \
670 _ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr); \
671 _ptrs = bch2_bkey_ptrs(_k); \
679 bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
680 struct bch_extent_ptr, u64);
681 bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
682 struct bch_extent_ptr *
683 bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
685 void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
687 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
688 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
690 int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c,
691 unsigned, struct printbuf *);
693 void bch2_ptr_swab(struct bkey_s);
695 /* Generic extent code: */
697 enum bch_extent_overlap {
698 BCH_EXTENT_OVERLAP_ALL = 0,
699 BCH_EXTENT_OVERLAP_BACK = 1,
700 BCH_EXTENT_OVERLAP_FRONT = 2,
701 BCH_EXTENT_OVERLAP_MIDDLE = 3,
704 /* Returns how k overlaps with m */
705 static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
706 const struct bkey *m)
708 int cmp1 = bkey_lt(k->p, m->p);
709 int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
711 return (cmp1 << 1) + cmp2;
714 int bch2_cut_front_s(struct bpos, struct bkey_s);
715 int bch2_cut_back_s(struct bpos, struct bkey_s);
717 static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
719 bch2_cut_front_s(where, bkey_i_to_s(k));
722 static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
724 bch2_cut_back_s(where, bkey_i_to_s(k));
728 * bch_key_resize - adjust size of @k
730 * bkey_start_offset(k) will be preserved, modifies where the extent ends
732 static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
734 k->p.offset -= k->size;
735 k->p.offset += new_size;
740 * In extent_sort_fix_overlapping(), insert_fixup_extent(),
741 * extent_merge_inline() - we're modifying keys in place that are packed. To do
742 * that we have to unpack the key, modify the unpacked key - then this
743 * copies/repacks the unpacked to the original as necessary.
745 static inline void extent_save(struct btree *b, struct bkey_packed *dst,
748 struct bkey_format *f = &b->format;
749 struct bkey_i *dst_unpacked;
751 if ((dst_unpacked = packed_to_bkey(dst)))
752 dst_unpacked->k = *src;
754 BUG_ON(!bch2_bkey_pack_key(dst, src, f));
757 #endif /* _BCACHEFS_EXTENTS_H */