1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BKEY_H
3 #define _BCACHEFS_BKEY_H
6 #include "bcachefs_format.h"
8 #include "btree_types.h"
12 enum bkey_invalid_flags {
13 BKEY_INVALID_WRITE = (1U << 0),
14 BKEY_INVALID_COMMIT = (1U << 1),
15 BKEY_INVALID_JOURNAL = (1U << 2),
21 * compiled unpack functions are disabled, pending a new interface for
22 * dynamically allocating executable memory:
26 #define HAVE_BCACHEFS_COMPILED_UNPACK 1
30 void bch2_bkey_packed_to_binary_text(struct printbuf *,
31 const struct bkey_format *,
32 const struct bkey_packed *);
34 /* bkey with split value, const */
37 const struct bch_val *v;
40 /* bkey with split value */
51 #define bkey_p_next(_k) vstruct_next(_k)
53 static inline struct bkey_i *bkey_next(struct bkey_i *k)
55 return (struct bkey_i *) (k->_data + k->k.u64s);
58 #define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)
60 static inline size_t bkey_val_bytes(const struct bkey *k)
62 return bkey_val_u64s(k) * sizeof(u64);
65 static inline void set_bkey_val_u64s(struct bkey *k, unsigned val_u64s)
67 unsigned u64s = BKEY_U64s + val_u64s;
69 BUG_ON(u64s > U8_MAX);
73 static inline void set_bkey_val_bytes(struct bkey *k, unsigned bytes)
75 set_bkey_val_u64s(k, DIV_ROUND_UP(bytes, sizeof(u64)));
78 #define bkey_val_end(_k) ((void *) (((u64 *) (_k).v) + bkey_val_u64s((_k).k)))
80 #define bkey_deleted(_k) ((_k)->type == KEY_TYPE_deleted)
82 #define bkey_whiteout(_k) \
83 ((_k)->type == KEY_TYPE_deleted || (_k)->type == KEY_TYPE_whiteout)
92 #define bkey_lr_packed(_l, _r) \
93 ((_l)->format + ((_r)->format << 1))
95 #define bkey_copy(_dst, _src) \
97 BUILD_BUG_ON(!type_is(_dst, struct bkey_i *) && \
98 !type_is(_dst, struct bkey_packed *)); \
99 BUILD_BUG_ON(!type_is(_src, struct bkey_i *) && \
100 !type_is(_src, struct bkey_packed *)); \
101 EBUG_ON((u64 *) (_dst) > (u64 *) (_src) && \
102 (u64 *) (_dst) < (u64 *) (_src) + \
103 ((struct bkey *) (_src))->u64s); \
105 memcpy_u64s_small((_dst), (_src), \
106 ((struct bkey *) (_src))->u64s); \
112 unsigned bch2_bkey_greatest_differing_bit(const struct btree *,
113 const struct bkey_packed *,
114 const struct bkey_packed *);
116 unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *);
119 int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *,
120 const struct bkey_packed *,
121 const struct btree *);
124 int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *,
125 const struct bkey_packed *,
126 const struct bpos *);
129 int bch2_bkey_cmp_packed(const struct btree *,
130 const struct bkey_packed *,
131 const struct bkey_packed *);
134 int __bch2_bkey_cmp_left_packed(const struct btree *,
135 const struct bkey_packed *,
136 const struct bpos *);
139 int bkey_cmp_left_packed(const struct btree *b,
140 const struct bkey_packed *l, const struct bpos *r)
142 return __bch2_bkey_cmp_left_packed(b, l, r);
146 * The compiler generates better code when we pass bpos by ref, but it's often
147 * enough terribly convenient to pass it by val... as much as I hate c++, const
148 * ref would be nice here:
151 static inline int bkey_cmp_left_packed_byval(const struct btree *b,
152 const struct bkey_packed *l,
155 return bkey_cmp_left_packed(b, l, &r);
158 static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
160 return !((l.inode ^ r.inode) |
161 (l.offset ^ r.offset) |
162 (l.snapshot ^ r.snapshot));
165 static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
167 return l.inode != r.inode ? l.inode < r.inode :
168 l.offset != r.offset ? l.offset < r.offset :
169 l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
172 static __always_inline bool bpos_le(struct bpos l, struct bpos r)
174 return l.inode != r.inode ? l.inode < r.inode :
175 l.offset != r.offset ? l.offset < r.offset :
176 l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
179 static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
181 return bpos_lt(r, l);
184 static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
186 return bpos_le(r, l);
189 static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
191 return cmp_int(l.inode, r.inode) ?:
192 cmp_int(l.offset, r.offset) ?:
193 cmp_int(l.snapshot, r.snapshot);
196 static inline struct bpos bpos_min(struct bpos l, struct bpos r)
198 return bpos_lt(l, r) ? l : r;
201 static inline struct bpos bpos_max(struct bpos l, struct bpos r)
203 return bpos_gt(l, r) ? l : r;
206 static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
208 return !((l.inode ^ r.inode) |
209 (l.offset ^ r.offset));
212 static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
214 return l.inode != r.inode
216 : l.offset < r.offset;
219 static __always_inline bool bkey_le(struct bpos l, struct bpos r)
221 return l.inode != r.inode
223 : l.offset <= r.offset;
226 static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
228 return bkey_lt(r, l);
231 static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
233 return bkey_le(r, l);
236 static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
238 return cmp_int(l.inode, r.inode) ?:
239 cmp_int(l.offset, r.offset);
242 static inline struct bpos bkey_min(struct bpos l, struct bpos r)
244 return bkey_lt(l, r) ? l : r;
247 static inline struct bpos bkey_max(struct bpos l, struct bpos r)
249 return bkey_gt(l, r) ? l : r;
252 void bch2_bpos_swab(struct bpos *);
253 void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *);
255 static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
257 return cmp_int(l.hi, r.hi) ?:
261 #define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })
262 #define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL })
264 static __always_inline int bversion_zero(struct bversion v)
266 return !bversion_cmp(v, ZERO_VERSION);
269 #ifdef CONFIG_BCACHEFS_DEBUG
270 /* statement expressions confusing unlikely()? */
271 #define bkey_packed(_k) \
272 ({ EBUG_ON((_k)->format > KEY_FORMAT_CURRENT); \
273 (_k)->format != KEY_FORMAT_CURRENT; })
275 #define bkey_packed(_k) ((_k)->format != KEY_FORMAT_CURRENT)
279 * It's safe to treat an unpacked bkey as a packed one, but not the reverse
281 static inline struct bkey_packed *bkey_to_packed(struct bkey_i *k)
283 return (struct bkey_packed *) k;
286 static inline const struct bkey_packed *bkey_to_packed_c(const struct bkey_i *k)
288 return (const struct bkey_packed *) k;
291 static inline struct bkey_i *packed_to_bkey(struct bkey_packed *k)
293 return bkey_packed(k) ? NULL : (struct bkey_i *) k;
296 static inline const struct bkey *packed_to_bkey_c(const struct bkey_packed *k)
298 return bkey_packed(k) ? NULL : (const struct bkey *) k;
301 static inline unsigned bkey_format_key_bits(const struct bkey_format *format)
303 return format->bits_per_field[BKEY_FIELD_INODE] +
304 format->bits_per_field[BKEY_FIELD_OFFSET] +
305 format->bits_per_field[BKEY_FIELD_SNAPSHOT];
308 static inline struct bpos bpos_successor(struct bpos p)
318 static inline struct bpos bpos_predecessor(struct bpos p)
328 static inline struct bpos bpos_nosnap_successor(struct bpos p)
339 static inline struct bpos bpos_nosnap_predecessor(struct bpos p)
350 static inline u64 bkey_start_offset(const struct bkey *k)
352 return k->p.offset - k->size;
355 static inline struct bpos bkey_start_pos(const struct bkey *k)
357 return (struct bpos) {
359 .offset = bkey_start_offset(k),
360 .snapshot = k->p.snapshot,
366 static inline unsigned bkeyp_key_u64s(const struct bkey_format *format,
367 const struct bkey_packed *k)
369 unsigned ret = bkey_packed(k) ? format->key_u64s : BKEY_U64s;
371 EBUG_ON(k->u64s < ret);
375 static inline unsigned bkeyp_key_bytes(const struct bkey_format *format,
376 const struct bkey_packed *k)
378 return bkeyp_key_u64s(format, k) * sizeof(u64);
381 static inline unsigned bkeyp_val_u64s(const struct bkey_format *format,
382 const struct bkey_packed *k)
384 return k->u64s - bkeyp_key_u64s(format, k);
387 static inline size_t bkeyp_val_bytes(const struct bkey_format *format,
388 const struct bkey_packed *k)
390 return bkeyp_val_u64s(format, k) * sizeof(u64);
393 static inline void set_bkeyp_val_u64s(const struct bkey_format *format,
394 struct bkey_packed *k, unsigned val_u64s)
396 k->u64s = bkeyp_key_u64s(format, k) + val_u64s;
399 #define bkeyp_val(_format, _k) \
400 ((struct bch_val *) ((_k)->_data + bkeyp_key_u64s(_format, _k)))
402 extern const struct bkey_format bch2_bkey_format_current;
404 bool bch2_bkey_transform(const struct bkey_format *,
405 struct bkey_packed *,
406 const struct bkey_format *,
407 const struct bkey_packed *);
409 struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
410 const struct bkey_packed *);
412 #ifndef HAVE_BCACHEFS_COMPILED_UNPACK
413 struct bpos __bkey_unpack_pos(const struct bkey_format *,
414 const struct bkey_packed *);
417 bool bch2_bkey_pack_key(struct bkey_packed *, const struct bkey *,
418 const struct bkey_format *);
420 enum bkey_pack_pos_ret {
422 BKEY_PACK_POS_SMALLER,
426 enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *, struct bpos,
427 const struct btree *);
429 static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in,
430 const struct btree *b)
432 return bch2_bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT;
435 void bch2_bkey_unpack(const struct btree *, struct bkey_i *,
436 const struct bkey_packed *);
437 bool bch2_bkey_pack(struct bkey_packed *, const struct bkey_i *,
438 const struct bkey_format *);
440 typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *);
443 __bkey_unpack_key_format_checked(const struct btree *b,
445 const struct bkey_packed *src)
447 if (IS_ENABLED(HAVE_BCACHEFS_COMPILED_UNPACK)) {
448 compiled_unpack_fn unpack_fn = b->aux_data;
451 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
452 bch2_expensive_debug_checks) {
453 struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src);
455 BUG_ON(memcmp(dst, &dst2, sizeof(*dst)));
458 *dst = __bch2_bkey_unpack_key(&b->format, src);
462 static inline struct bkey
463 bkey_unpack_key_format_checked(const struct btree *b,
464 const struct bkey_packed *src)
468 __bkey_unpack_key_format_checked(b, &dst, src);
472 static inline void __bkey_unpack_key(const struct btree *b,
474 const struct bkey_packed *src)
476 if (likely(bkey_packed(src)))
477 __bkey_unpack_key_format_checked(b, dst, src);
479 *dst = *packed_to_bkey_c(src);
483 * bkey_unpack_key -- unpack just the key, not the value
485 static inline struct bkey bkey_unpack_key(const struct btree *b,
486 const struct bkey_packed *src)
488 return likely(bkey_packed(src))
489 ? bkey_unpack_key_format_checked(b, src)
490 : *packed_to_bkey_c(src);
493 static inline struct bpos
494 bkey_unpack_pos_format_checked(const struct btree *b,
495 const struct bkey_packed *src)
497 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK
498 return bkey_unpack_key_format_checked(b, src).p;
500 return __bkey_unpack_pos(&b->format, src);
504 static inline struct bpos bkey_unpack_pos(const struct btree *b,
505 const struct bkey_packed *src)
507 return likely(bkey_packed(src))
508 ? bkey_unpack_pos_format_checked(b, src)
509 : packed_to_bkey_c(src)->p;
512 /* Disassembled bkeys */
514 static inline struct bkey_s_c bkey_disassemble(const struct btree *b,
515 const struct bkey_packed *k,
518 __bkey_unpack_key(b, u, k);
520 return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), };
523 /* non const version: */
524 static inline struct bkey_s __bkey_disassemble(const struct btree *b,
525 struct bkey_packed *k,
528 __bkey_unpack_key(b, u, k);
530 return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), };
533 static inline u64 bkey_field_max(const struct bkey_format *f,
534 enum bch_bkey_fields nr)
536 return f->bits_per_field[nr] < 64
537 ? (le64_to_cpu(f->field_offset[nr]) +
538 ~(~0ULL << f->bits_per_field[nr]))
542 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK
544 int bch2_compile_bkey_format(const struct bkey_format *, void *);
548 static inline int bch2_compile_bkey_format(const struct bkey_format *format,
549 void *out) { return 0; }
553 static inline void bkey_reassemble(struct bkey_i *dst,
557 memcpy_u64s_small(&dst->v, src.v, bkey_val_u64s(src.k));
560 #define bkey_s_null ((struct bkey_s) { .k = NULL })
561 #define bkey_s_c_null ((struct bkey_s_c) { .k = NULL })
563 #define bkey_s_err(err) ((struct bkey_s) { .k = ERR_PTR(err) })
564 #define bkey_s_c_err(err) ((struct bkey_s_c) { .k = ERR_PTR(err) })
566 static inline struct bkey_s bkey_to_s(struct bkey *k)
568 return (struct bkey_s) { .k = k, .v = NULL };
571 static inline struct bkey_s_c bkey_to_s_c(const struct bkey *k)
573 return (struct bkey_s_c) { .k = k, .v = NULL };
576 static inline struct bkey_s bkey_i_to_s(struct bkey_i *k)
578 return (struct bkey_s) { .k = &k->k, .v = &k->v };
581 static inline struct bkey_s_c bkey_i_to_s_c(const struct bkey_i *k)
583 return (struct bkey_s_c) { .k = &k->k, .v = &k->v };
587 * For a given type of value (e.g. struct bch_extent), generates the types for
588 * bkey + bch_extent - inline, split, split const - and also all the conversion
589 * functions, which also check that the value is of the correct type.
591 * We use anonymous unions for upcasting - e.g. converting from e.g. a
592 * bkey_i_extent to a bkey_i - since that's always safe, instead of conversion
595 #define x(name, ...) \
596 struct bkey_i_##name { \
601 struct bch_##name v; \
604 struct bkey_s_c_##name { \
607 const struct bkey *k; \
608 const struct bch_##name *v; \
610 struct bkey_s_c s_c; \
614 struct bkey_s_##name { \
618 struct bch_##name *v; \
620 struct bkey_s_c_##name c; \
622 struct bkey_s_c s_c; \
626 static inline struct bkey_i_##name *bkey_i_to_##name(struct bkey_i *k) \
628 EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
629 return container_of(&k->k, struct bkey_i_##name, k); \
632 static inline const struct bkey_i_##name * \
633 bkey_i_to_##name##_c(const struct bkey_i *k) \
635 EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
636 return container_of(&k->k, struct bkey_i_##name, k); \
639 static inline struct bkey_s_##name bkey_s_to_##name(struct bkey_s k) \
641 EBUG_ON(!IS_ERR_OR_NULL(k.k) && k.k->type != KEY_TYPE_##name); \
642 return (struct bkey_s_##name) { \
644 .v = container_of(k.v, struct bch_##name, v), \
648 static inline struct bkey_s_c_##name bkey_s_c_to_##name(struct bkey_s_c k)\
650 EBUG_ON(!IS_ERR_OR_NULL(k.k) && k.k->type != KEY_TYPE_##name); \
651 return (struct bkey_s_c_##name) { \
653 .v = container_of(k.v, struct bch_##name, v), \
657 static inline struct bkey_s_##name name##_i_to_s(struct bkey_i_##name *k)\
659 return (struct bkey_s_##name) { \
665 static inline struct bkey_s_c_##name \
666 name##_i_to_s_c(const struct bkey_i_##name *k) \
668 return (struct bkey_s_c_##name) { \
674 static inline struct bkey_s_##name bkey_i_to_s_##name(struct bkey_i *k) \
676 EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
677 return (struct bkey_s_##name) { \
679 .v = container_of(&k->v, struct bch_##name, v), \
683 static inline struct bkey_s_c_##name \
684 bkey_i_to_s_c_##name(const struct bkey_i *k) \
686 EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
687 return (struct bkey_s_c_##name) { \
689 .v = container_of(&k->v, struct bch_##name, v), \
693 static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\
695 struct bkey_i_##name *k = \
696 container_of(&_k->k, struct bkey_i_##name, k); \
699 memset(&k->v, 0, sizeof(k->v)); \
700 k->k.type = KEY_TYPE_##name; \
701 set_bkey_val_bytes(&k->k, sizeof(k->v)); \
709 /* byte order helpers */
711 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
713 static inline unsigned high_word_offset(const struct bkey_format *f)
715 return f->key_u64s - 1;
718 #define high_bit_offset 0
719 #define nth_word(p, n) ((p) - (n))
721 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
723 static inline unsigned high_word_offset(const struct bkey_format *f)
728 #define high_bit_offset KEY_PACKED_BITS_START
729 #define nth_word(p, n) ((p) + (n))
732 #error edit for your odd byteorder.
735 #define high_word(f, k) ((k)->_data + high_word_offset(f))
736 #define next_word(p) nth_word(p, 1)
737 #define prev_word(p) nth_word(p, -1)
739 #ifdef CONFIG_BCACHEFS_DEBUG
740 void bch2_bkey_pack_test(void);
742 static inline void bch2_bkey_pack_test(void) {}
745 #define bkey_fields() \
746 x(BKEY_FIELD_INODE, p.inode) \
747 x(BKEY_FIELD_OFFSET, p.offset) \
748 x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
749 x(BKEY_FIELD_SIZE, size) \
750 x(BKEY_FIELD_VERSION_HI, version.hi) \
751 x(BKEY_FIELD_VERSION_LO, version.lo)
753 struct bkey_format_state {
754 u64 field_min[BKEY_NR_FIELDS];
755 u64 field_max[BKEY_NR_FIELDS];
758 void bch2_bkey_format_init(struct bkey_format_state *);
760 static inline void __bkey_format_add(struct bkey_format_state *s, unsigned field, u64 v)
762 s->field_min[field] = min(s->field_min[field], v);
763 s->field_max[field] = max(s->field_max[field], v);
767 * Changes @format so that @k can be successfully packed with @format
769 static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
771 #define x(id, field) __bkey_format_add(s, id, k->field);
776 void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
777 struct bkey_format bch2_bkey_format_done(struct bkey_format_state *);
778 int bch2_bkey_format_invalid(struct bch_fs *, struct bkey_format *,
779 enum bkey_invalid_flags, struct printbuf *);
780 void bch2_bkey_format_to_text(struct printbuf *, const struct bkey_format *);
782 #endif /* _BCACHEFS_BKEY_H */