1 // SPDX-License-Identifier: GPL-2.0
5 #include "bkey_methods.h"
12 #define EBUG_ON(cond) BUG_ON(cond)
17 const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
19 struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
20 const struct bkey_packed *);
22 void bch2_to_binary(char *out, const u64 *p, unsigned nr_bits)
24 unsigned bit = high_bit_offset, done = 0;
28 if (done && !(done % 8))
30 *out++ = *p & (1ULL << (63 - bit)) ? '1' : '0';
33 if (done == nr_bits) {
44 #ifdef CONFIG_BCACHEFS_DEBUG
46 static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
47 const struct bkey *unpacked,
48 const struct bkey_format *format)
52 BUG_ON(bkeyp_val_u64s(format, packed) !=
53 bkey_val_u64s(unpacked));
55 BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed));
57 tmp = __bch2_bkey_unpack_key(format, packed);
59 if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
60 char buf1[160], buf2[160];
61 char buf3[160], buf4[160];
63 bch2_bkey_to_text(&PBUF(buf1), unpacked);
64 bch2_bkey_to_text(&PBUF(buf2), &tmp);
65 bch2_to_binary(buf3, (void *) unpacked, 80);
66 bch2_to_binary(buf4, high_word(format, packed), 80);
68 panic("keys differ: format u64s %u fields %u %u %u %u %u\n%s\n%s\n%s\n%s\n",
70 format->bits_per_field[0],
71 format->bits_per_field[1],
72 format->bits_per_field[2],
73 format->bits_per_field[3],
74 format->bits_per_field[4],
75 buf1, buf2, buf3, buf4);
80 static inline void bch2_bkey_pack_verify(const struct bkey_packed *packed,
81 const struct bkey *unpacked,
82 const struct bkey_format *format) {}
86 const struct bkey_format *format;
87 unsigned bits; /* bits remaining in current word */
88 u64 w; /* current word */
89 u64 *p; /* pointer to next word */
93 static struct pack_state pack_state_init(const struct bkey_format *format,
94 struct bkey_packed *k)
96 u64 *p = high_word(format, k);
98 return (struct pack_state) {
100 .bits = 64 - high_bit_offset,
107 static void pack_state_finish(struct pack_state *state,
108 struct bkey_packed *k)
110 EBUG_ON(state->p < k->_data);
111 EBUG_ON(state->p >= k->_data + state->format->key_u64s);
113 *state->p = state->w;
116 struct unpack_state {
117 const struct bkey_format *format;
118 unsigned bits; /* bits remaining in current word */
119 u64 w; /* current word */
120 const u64 *p; /* pointer to next word */
124 static struct unpack_state unpack_state_init(const struct bkey_format *format,
125 const struct bkey_packed *k)
127 const u64 *p = high_word(format, k);
129 return (struct unpack_state) {
131 .bits = 64 - high_bit_offset,
132 .w = *p << high_bit_offset,
138 static u64 get_inc_field(struct unpack_state *state, unsigned field)
140 unsigned bits = state->format->bits_per_field[field];
141 u64 v = 0, offset = le64_to_cpu(state->format->field_offset[field]);
143 if (bits >= state->bits) {
144 v = state->w >> (64 - bits);
147 state->p = next_word(state->p);
148 state->w = *state->p;
152 /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
153 v |= (state->w >> 1) >> (63 - bits);
161 static bool set_inc_field(struct pack_state *state, unsigned field, u64 v)
163 unsigned bits = state->format->bits_per_field[field];
164 u64 offset = le64_to_cpu(state->format->field_offset[field]);
174 if (bits > state->bits) {
176 /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
177 state->w |= (v >> 1) >> (bits - 1);
179 *state->p = state->w;
180 state->p = next_word(state->p);
186 state->w |= v << state->bits;
192 * Note: does NOT set out->format (we don't know what it should be here!)
194 * Also: doesn't work on extents - it doesn't preserve the invariant that
195 * if k is packed bkey_start_pos(k) will successfully pack
197 static bool bch2_bkey_transform_key(const struct bkey_format *out_f,
198 struct bkey_packed *out,
199 const struct bkey_format *in_f,
200 const struct bkey_packed *in)
202 struct pack_state out_s = pack_state_init(out_f, out);
203 struct unpack_state in_s = unpack_state_init(in_f, in);
208 for (i = 0; i < BKEY_NR_FIELDS; i++)
209 if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
212 /* Can't happen because the val would be too big to unpack: */
213 EBUG_ON(in->u64s - in_f->key_u64s + out_f->key_u64s > U8_MAX);
215 pack_state_finish(&out_s, out);
216 out->u64s = out_f->key_u64s + in->u64s - in_f->key_u64s;
217 out->needs_whiteout = in->needs_whiteout;
218 out->type = in->type;
223 bool bch2_bkey_transform(const struct bkey_format *out_f,
224 struct bkey_packed *out,
225 const struct bkey_format *in_f,
226 const struct bkey_packed *in)
228 if (!bch2_bkey_transform_key(out_f, out, in_f, in))
231 memcpy_u64s((u64 *) out + out_f->key_u64s,
232 (u64 *) in + in_f->key_u64s,
233 (in->u64s - in_f->key_u64s));
237 #define bkey_fields() \
238 x(BKEY_FIELD_INODE, p.inode) \
239 x(BKEY_FIELD_OFFSET, p.offset) \
240 x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
241 x(BKEY_FIELD_SIZE, size) \
242 x(BKEY_FIELD_VERSION_HI, version.hi) \
243 x(BKEY_FIELD_VERSION_LO, version.lo)
245 struct bkey __bch2_bkey_unpack_key(const struct bkey_format *format,
246 const struct bkey_packed *in)
248 struct unpack_state state = unpack_state_init(format, in);
251 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
252 EBUG_ON(in->u64s < format->key_u64s);
253 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
254 EBUG_ON(in->u64s - format->key_u64s + BKEY_U64s > U8_MAX);
256 out.u64s = BKEY_U64s + in->u64s - format->key_u64s;
257 out.format = KEY_FORMAT_CURRENT;
258 out.needs_whiteout = in->needs_whiteout;
262 #define x(id, field) out.field = get_inc_field(&state, id);
269 #ifndef HAVE_BCACHEFS_COMPILED_UNPACK
270 struct bpos __bkey_unpack_pos(const struct bkey_format *format,
271 const struct bkey_packed *in)
273 struct unpack_state state = unpack_state_init(format, in);
276 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
277 EBUG_ON(in->u64s < format->key_u64s);
278 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
280 out.inode = get_inc_field(&state, BKEY_FIELD_INODE);
281 out.offset = get_inc_field(&state, BKEY_FIELD_OFFSET);
282 out.snapshot = get_inc_field(&state, BKEY_FIELD_SNAPSHOT);
289 * bch2_bkey_pack_key -- pack just the key, not the value
291 bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
292 const struct bkey_format *format)
294 struct pack_state state = pack_state_init(format, out);
296 EBUG_ON((void *) in == (void *) out);
297 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
298 EBUG_ON(in->format != KEY_FORMAT_CURRENT);
302 #define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
307 * Extents - we have to guarantee that if an extent is packed, a trimmed
308 * version will also pack:
310 if (bkey_start_offset(in) <
311 le64_to_cpu(format->field_offset[BKEY_FIELD_OFFSET]))
314 pack_state_finish(&state, out);
315 out->u64s = format->key_u64s + in->u64s - BKEY_U64s;
316 out->format = KEY_FORMAT_LOCAL_BTREE;
317 out->needs_whiteout = in->needs_whiteout;
318 out->type = in->type;
320 bch2_bkey_pack_verify(out, in, format);
325 * bch2_bkey_unpack -- unpack the key and the value
327 void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
328 const struct bkey_packed *src)
330 __bkey_unpack_key(b, &dst->k, src);
333 bkeyp_val(&b->format, src),
334 bkeyp_val_u64s(&b->format, src));
338 * bch2_bkey_pack -- pack the key and the value
340 bool bch2_bkey_pack(struct bkey_packed *out, const struct bkey_i *in,
341 const struct bkey_format *format)
343 struct bkey_packed tmp;
345 if (!bch2_bkey_pack_key(&tmp, &in->k, format))
348 memmove_u64s((u64 *) out + format->key_u64s,
350 bkey_val_u64s(&in->k));
351 memcpy_u64s(out, &tmp, format->key_u64s);
357 static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v)
359 unsigned bits = state->format->bits_per_field[field];
360 u64 offset = le64_to_cpu(state->format->field_offset[field]);
366 if (fls64(v) > bits) {
367 v = ~(~0ULL << bits);
371 if (bits > state->bits) {
373 state->w |= (v >> 1) >> (bits - 1);
375 *state->p = state->w;
376 state->p = next_word(state->p);
382 state->w |= v << state->bits;
387 #ifdef CONFIG_BCACHEFS_DEBUG
388 static bool bkey_packed_successor(struct bkey_packed *out,
389 const struct btree *b,
390 struct bkey_packed k)
392 const struct bkey_format *f = &b->format;
393 unsigned nr_key_bits = b->nr_key_bits;
394 unsigned first_bit, offset;
397 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
404 first_bit = high_bit_offset + nr_key_bits - 1;
405 p = nth_word(high_word(f, out), first_bit >> 6);
406 offset = 63 - (first_bit & 63);
408 while (nr_key_bits) {
409 unsigned bits = min(64 - offset, nr_key_bits);
410 u64 mask = (~0ULL >> (64 - bits)) << offset;
412 if ((*p & mask) != mask) {
413 *p += 1ULL << offset;
414 EBUG_ON(bch2_bkey_cmp_packed(b, out, &k) <= 0);
429 * Returns a packed key that compares <= in
431 * This is used in bset_search_tree(), where we need a packed pos in order to be
432 * able to compare against the keys in the auxiliary search tree - and it's
433 * legal to use a packed pos that isn't equivalent to the original pos,
434 * _provided_ it compares <= to the original pos.
436 enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
438 const struct btree *b)
440 const struct bkey_format *f = &b->format;
441 struct pack_state state = pack_state_init(f, out);
442 #ifdef CONFIG_BCACHEFS_DEBUG
443 struct bpos orig = in;
449 if (unlikely(in.snapshot <
450 le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
453 return BKEY_PACK_POS_FAIL;
454 in.snapshot = KEY_SNAPSHOT_MAX;
458 if (unlikely(in.offset <
459 le64_to_cpu(f->field_offset[BKEY_FIELD_OFFSET]))) {
461 return BKEY_PACK_POS_FAIL;
462 in.offset = KEY_OFFSET_MAX;
463 in.snapshot = KEY_SNAPSHOT_MAX;
467 if (unlikely(in.inode <
468 le64_to_cpu(f->field_offset[BKEY_FIELD_INODE])))
469 return BKEY_PACK_POS_FAIL;
471 if (!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode)) {
472 in.offset = KEY_OFFSET_MAX;
473 in.snapshot = KEY_SNAPSHOT_MAX;
477 if (!set_inc_field_lossy(&state, BKEY_FIELD_OFFSET, in.offset)) {
478 in.snapshot = KEY_SNAPSHOT_MAX;
482 if (!set_inc_field_lossy(&state, BKEY_FIELD_SNAPSHOT, in.snapshot))
485 pack_state_finish(&state, out);
486 out->u64s = f->key_u64s;
487 out->format = KEY_FORMAT_LOCAL_BTREE;
488 out->type = KEY_TYPE_deleted;
490 #ifdef CONFIG_BCACHEFS_DEBUG
492 BUG_ON(bkey_cmp_left_packed(b, out, &orig));
494 struct bkey_packed successor;
496 BUG_ON(bkey_cmp_left_packed(b, out, &orig) >= 0);
497 BUG_ON(bkey_packed_successor(&successor, b, *out) &&
498 bkey_cmp_left_packed(b, &successor, &orig) < 0);
502 return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER;
505 void bch2_bkey_format_init(struct bkey_format_state *s)
509 for (i = 0; i < ARRAY_SIZE(s->field_min); i++)
510 s->field_min[i] = U64_MAX;
512 for (i = 0; i < ARRAY_SIZE(s->field_max); i++)
515 /* Make sure we can store a size of 0: */
516 s->field_min[BKEY_FIELD_SIZE] = 0;
519 static void __bkey_format_add(struct bkey_format_state *s,
520 unsigned field, u64 v)
522 s->field_min[field] = min(s->field_min[field], v);
523 s->field_max[field] = max(s->field_max[field], v);
527 * Changes @format so that @k can be successfully packed with @format
529 void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
531 #define x(id, field) __bkey_format_add(s, id, k->field);
534 __bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k));
537 void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
541 __bkey_format_add(s, field++, p.inode);
542 __bkey_format_add(s, field++, p.offset);
543 __bkey_format_add(s, field++, p.snapshot);
547 * We don't want it to be possible for the packed format to represent fields
548 * bigger than a u64... that will cause confusion and issues (like with
549 * bkey_packed_successor())
551 static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i,
552 unsigned bits, u64 offset)
554 unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
555 u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
557 bits = min(bits, unpacked_bits);
559 offset = bits == unpacked_bits ? 0 : min(offset, unpacked_max - ((1ULL << bits) - 1));
561 f->bits_per_field[i] = bits;
562 f->field_offset[i] = cpu_to_le64(offset);
565 struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s)
567 unsigned i, bits = KEY_PACKED_BITS_START;
568 struct bkey_format ret = {
569 .nr_fields = BKEY_NR_FIELDS,
572 for (i = 0; i < ARRAY_SIZE(s->field_min); i++) {
573 s->field_min[i] = min(s->field_min[i], s->field_max[i]);
575 set_format_field(&ret, i,
576 fls64(s->field_max[i] - s->field_min[i]),
579 bits += ret.bits_per_field[i];
582 /* allow for extent merging: */
583 if (ret.bits_per_field[BKEY_FIELD_SIZE]) {
584 ret.bits_per_field[BKEY_FIELD_SIZE] += 4;
588 ret.key_u64s = DIV_ROUND_UP(bits, 64);
590 /* if we have enough spare bits, round fields up to nearest byte */
591 bits = ret.key_u64s * 64 - bits;
593 for (i = 0; i < ARRAY_SIZE(ret.bits_per_field); i++) {
594 unsigned r = round_up(ret.bits_per_field[i], 8) -
595 ret.bits_per_field[i];
598 set_format_field(&ret, i,
599 ret.bits_per_field[i] + r,
600 le64_to_cpu(ret.field_offset[i]));
605 EBUG_ON(bch2_bkey_format_validate(&ret));
609 const char *bch2_bkey_format_validate(struct bkey_format *f)
611 unsigned i, bits = KEY_PACKED_BITS_START;
613 if (f->nr_fields != BKEY_NR_FIELDS)
614 return "incorrect number of fields";
616 for (i = 0; i < f->nr_fields; i++) {
617 unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
618 u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1));
619 u64 field_offset = le64_to_cpu(f->field_offset[i]);
621 if (f->bits_per_field[i] > unpacked_bits)
622 return "field too large";
624 if ((f->bits_per_field[i] == unpacked_bits) && field_offset)
625 return "offset + bits overflow";
627 if (((field_offset + ((1ULL << f->bits_per_field[i]) - 1)) &
630 return "offset + bits overflow";
632 bits += f->bits_per_field[i];
635 if (f->key_u64s != DIV_ROUND_UP(bits, 64))
636 return "incorrect key_u64s";
642 * Most significant differing bit
643 * Bits are indexed from 0 - return is [0, nr_key_bits)
646 unsigned bch2_bkey_greatest_differing_bit(const struct btree *b,
647 const struct bkey_packed *l_k,
648 const struct bkey_packed *r_k)
650 const u64 *l = high_word(&b->format, l_k);
651 const u64 *r = high_word(&b->format, r_k);
652 unsigned nr_key_bits = b->nr_key_bits;
653 unsigned word_bits = 64 - high_bit_offset;
656 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
658 /* for big endian, skip past header */
659 l_v = *l & (~0ULL >> high_bit_offset);
660 r_v = *r & (~0ULL >> high_bit_offset);
662 while (nr_key_bits) {
663 if (nr_key_bits < word_bits) {
664 l_v >>= word_bits - nr_key_bits;
665 r_v >>= word_bits - nr_key_bits;
668 nr_key_bits -= word_bits;
672 return fls64(l_v ^ r_v) - 1 + nr_key_bits;
687 * Bits are indexed from 0 - return is [0, nr_key_bits)
690 unsigned bch2_bkey_ffs(const struct btree *b, const struct bkey_packed *k)
692 const u64 *p = high_word(&b->format, k);
693 unsigned nr_key_bits = b->nr_key_bits;
694 unsigned ret = 0, offset;
696 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
698 offset = nr_key_bits;
699 while (offset > 64) {
704 offset = 64 - offset;
706 while (nr_key_bits) {
707 unsigned bits = nr_key_bits + offset < 64
711 u64 mask = (~0ULL >> (64 - bits)) << offset;
714 return ret + __ffs64(*p & mask) - offset;
727 static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
728 unsigned nr_key_bits)
733 /* we shouldn't need asm for this, but gcc is being retarded: */
735 asm(".intel_syntax noprefix;"
747 "lea rdi, [rdi - 8];"
748 "lea rsi, [rsi - 8];"
763 ".att_syntax prefix;"
764 : "=&D" (d0), "=&S" (d1), "=&d" (d2), "=&c" (d3), "=&a" (cmp)
765 : "0" (l), "1" (r), "3" (nr_key_bits)
766 : "r8", "r9", "cc", "memory");
771 #define I(_x) (*(out)++ = (_x))
773 #define I2(i0, i1) (I1(i0), I(i1))
774 #define I3(i0, i1, i2) (I2(i0, i1), I(i2))
775 #define I4(i0, i1, i2, i3) (I3(i0, i1, i2), I(i3))
776 #define I5(i0, i1, i2, i3, i4) (I4(i0, i1, i2, i3), I(i4))
778 static u8 *compile_bkey_field(const struct bkey_format *format, u8 *out,
779 enum bch_bkey_fields field,
780 unsigned dst_offset, unsigned dst_size,
783 unsigned bits = format->bits_per_field[field];
784 u64 offset = le64_to_cpu(format->field_offset[field]);
785 unsigned i, byte, bit_offset, align, shl, shr;
787 if (!bits && !offset) {
798 /* just return offset: */
802 if (offset > S32_MAX) {
803 /* mov [rdi + dst_offset], offset */
804 I3(0xc7, 0x47, dst_offset);
805 memcpy(out, &offset, 4);
808 I3(0xc7, 0x47, dst_offset + 4);
809 memcpy(out, (void *) &offset + 4, 4);
812 /* mov [rdi + dst_offset], offset */
814 I4(0x48, 0xc7, 0x47, dst_offset);
815 memcpy(out, &offset, 4);
820 /* mov [rdi + dst_offset], offset */
821 I3(0xc7, 0x47, dst_offset);
822 memcpy(out, &offset, 4);
832 bit_offset = format->key_u64s * 64;
833 for (i = 0; i <= field; i++)
834 bit_offset -= format->bits_per_field[i];
836 byte = bit_offset / 8;
837 bit_offset -= byte * 8;
841 if (bit_offset == 0 && bits == 8) {
842 /* movzx eax, BYTE PTR [rsi + imm8] */
843 I4(0x0f, 0xb6, 0x46, byte);
844 } else if (bit_offset == 0 && bits == 16) {
845 /* movzx eax, WORD PTR [rsi + imm8] */
846 I4(0x0f, 0xb7, 0x46, byte);
847 } else if (bit_offset + bits <= 32) {
848 align = min(4 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 3);
850 bit_offset += align * 8;
852 BUG_ON(bit_offset + bits > 32);
854 /* mov eax, [rsi + imm8] */
855 I3(0x8b, 0x46, byte);
859 I3(0xc1, 0xe8, bit_offset);
862 if (bit_offset + bits < 32) {
863 unsigned mask = ~0U >> (32 - bits);
867 memcpy(out, &mask, 4);
870 } else if (bit_offset + bits <= 64) {
871 align = min(8 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 7);
873 bit_offset += align * 8;
875 BUG_ON(bit_offset + bits > 64);
877 /* mov rax, [rsi + imm8] */
878 I4(0x48, 0x8b, 0x46, byte);
880 shl = 64 - bit_offset - bits;
881 shr = bit_offset + shl;
885 I4(0x48, 0xc1, 0xe0, shl);
890 I4(0x48, 0xc1, 0xe8, shr);
893 align = min(4 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 3);
895 bit_offset += align * 8;
897 BUG_ON(bit_offset + bits > 96);
899 /* mov rax, [rsi + byte] */
900 I4(0x48, 0x8b, 0x46, byte);
902 /* mov edx, [rsi + byte + 8] */
903 I3(0x8b, 0x56, byte + 8);
905 /* bits from next word: */
906 shr = bit_offset + bits - 64;
907 BUG_ON(shr > bit_offset);
909 /* shr rax, bit_offset */
910 I4(0x48, 0xc1, 0xe8, shr);
913 I4(0x48, 0xc1, 0xe2, 64 - shr);
916 I3(0x48, 0x09, 0xd0);
918 shr = bit_offset - shr;
922 I4(0x48, 0xc1, 0xe8, shr);
927 if (offset > S32_MAX) {
930 memcpy(out, &offset, 8);
933 I3(0x48, 0x01, 0xd0);
934 } else if (offset + (~0ULL >> (64 - bits)) > U32_MAX) {
937 memcpy(out, &offset, 4);
942 memcpy(out, &offset, 4);
948 /* mov [rdi + dst_offset], rax */
949 I4(0x48, 0x89, 0x47, dst_offset);
952 /* mov [rdi + dst_offset], eax */
953 I3(0x89, 0x47, dst_offset);
962 int bch2_compile_bkey_format(const struct bkey_format *format, void *_out)
964 bool eax_zeroed = false;
968 * rdi: dst - unpacked key
969 * rsi: src - packed key
972 /* k->u64s, k->format, k->type */
977 /* add eax, BKEY_U64s - format->key_u64s */
978 I5(0x05, BKEY_U64s - format->key_u64s, KEY_FORMAT_CURRENT, 0, 0);
980 /* and eax, imm32: mask out k->pad: */
981 I5(0x25, 0xff, 0xff, 0xff, 0);
986 #define x(id, field) \
987 out = compile_bkey_field(format, out, id, \
988 offsetof(struct bkey, field), \
989 sizeof(((struct bkey *) NULL)->field), \
997 return (void *) out - _out;
1001 static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
1002 unsigned nr_key_bits)
1009 /* for big endian, skip past header */
1010 nr_key_bits += high_bit_offset;
1011 l_v = *l & (~0ULL >> high_bit_offset);
1012 r_v = *r & (~0ULL >> high_bit_offset);
1015 if (nr_key_bits < 64) {
1016 l_v >>= 64 - nr_key_bits;
1017 r_v >>= 64 - nr_key_bits;
1023 if (!nr_key_bits || l_v != r_v)
1033 return cmp_int(l_v, r_v);
1038 int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *l,
1039 const struct bkey_packed *r,
1040 const struct btree *b)
1042 const struct bkey_format *f = &b->format;
1045 EBUG_ON(!bkey_packed(l) || !bkey_packed(r));
1046 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
1048 ret = __bkey_cmp_bits(high_word(f, l),
1052 EBUG_ON(ret != bpos_cmp(bkey_unpack_pos(b, l),
1053 bkey_unpack_pos(b, r)));
1058 int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b,
1059 const struct bkey_packed *l,
1060 const struct bpos *r)
1062 return bpos_cmp(bkey_unpack_pos_format_checked(b, l), *r);
1066 int bch2_bkey_cmp_packed(const struct btree *b,
1067 const struct bkey_packed *l,
1068 const struct bkey_packed *r)
1070 struct bkey unpacked;
1072 if (likely(bkey_packed(l) && bkey_packed(r)))
1073 return __bch2_bkey_cmp_packed_format_checked(l, r, b);
1075 if (bkey_packed(l)) {
1076 __bkey_unpack_key_format_checked(b, &unpacked, l);
1077 l = (void*) &unpacked;
1078 } else if (bkey_packed(r)) {
1079 __bkey_unpack_key_format_checked(b, &unpacked, r);
1080 r = (void*) &unpacked;
1083 return bpos_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
1087 int __bch2_bkey_cmp_left_packed(const struct btree *b,
1088 const struct bkey_packed *l,
1089 const struct bpos *r)
1091 const struct bkey *l_unpacked;
1093 return unlikely(l_unpacked = packed_to_bkey_c(l))
1094 ? bpos_cmp(l_unpacked->p, *r)
1095 : __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
1098 void bch2_bpos_swab(struct bpos *p)
1101 u8 *h = ((u8 *) &p[1]) - 1;
1110 void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
1112 const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current;
1113 u8 *l = k->key_start;
1114 u8 *h = (u8 *) (k->_data + f->key_u64s) - 1;
1123 #ifdef CONFIG_BCACHEFS_DEBUG
1124 void bch2_bkey_pack_test(void)
1126 struct bkey t = KEY(4134ULL, 1250629070527416633ULL, 0);
1127 struct bkey_packed p;
1129 struct bkey_format test_format = {
1131 .nr_fields = BKEY_NR_FIELDS,
1139 struct unpack_state in_s =
1140 unpack_state_init(&bch2_bkey_format_current, (void *) &t);
1141 struct pack_state out_s = pack_state_init(&test_format, &p);
1144 for (i = 0; i < out_s.format->nr_fields; i++) {
1145 u64 a, v = get_inc_field(&in_s, i);
1148 #define x(id, field) case id: a = t.field; break;
1156 panic("got %llu actual %llu i %u\n", v, a, i);
1158 if (!set_inc_field(&out_s, i, v))
1159 panic("failed at %u\n", i);
1162 BUG_ON(!bch2_bkey_pack_key(&p, &t, &test_format));