1 // SPDX-License-Identifier: GPL-2.0
6 #include "bkey_methods.h"
10 const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
12 void bch2_bkey_packed_to_binary_text(struct printbuf *out,
13 const struct bkey_format *f,
14 const struct bkey_packed *k)
16 const u64 *p = high_word(f, k);
17 unsigned word_bits = 64 - high_bit_offset;
18 unsigned nr_key_bits = bkey_format_key_bits(f) + high_bit_offset;
19 u64 v = *p & (~0ULL >> high_bit_offset);
22 prt_str(out, "(empty)");
27 unsigned next_key_bits = nr_key_bits;
29 if (nr_key_bits < 64) {
30 v >>= 64 - nr_key_bits;
36 bch2_prt_u64_binary(out, v, min(word_bits, nr_key_bits));
46 nr_key_bits = next_key_bits;
50 #ifdef CONFIG_BCACHEFS_DEBUG
52 static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
53 const struct bkey *unpacked,
54 const struct bkey_format *format)
58 BUG_ON(bkeyp_val_u64s(format, packed) !=
59 bkey_val_u64s(unpacked));
61 BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed));
63 tmp = __bch2_bkey_unpack_key(format, packed);
65 if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
66 struct printbuf buf = PRINTBUF;
68 prt_printf(&buf, "keys differ: format u64s %u fields %u %u %u %u %u\n",
70 format->bits_per_field[0],
71 format->bits_per_field[1],
72 format->bits_per_field[2],
73 format->bits_per_field[3],
74 format->bits_per_field[4]);
76 prt_printf(&buf, "compiled unpack: ");
77 bch2_bkey_to_text(&buf, unpacked);
80 prt_printf(&buf, "c unpack: ");
81 bch2_bkey_to_text(&buf, &tmp);
84 prt_printf(&buf, "compiled unpack: ");
85 bch2_bkey_packed_to_binary_text(&buf, &bch2_bkey_format_current,
86 (struct bkey_packed *) unpacked);
89 prt_printf(&buf, "c unpack: ");
90 bch2_bkey_packed_to_binary_text(&buf, &bch2_bkey_format_current,
91 (struct bkey_packed *) &tmp);
99 static inline void bch2_bkey_pack_verify(const struct bkey_packed *packed,
100 const struct bkey *unpacked,
101 const struct bkey_format *format) {}
105 const struct bkey_format *format;
106 unsigned bits; /* bits remaining in current word */
107 u64 w; /* current word */
108 u64 *p; /* pointer to next word */
112 static struct pack_state pack_state_init(const struct bkey_format *format,
113 struct bkey_packed *k)
115 u64 *p = high_word(format, k);
117 return (struct pack_state) {
119 .bits = 64 - high_bit_offset,
126 static void pack_state_finish(struct pack_state *state,
127 struct bkey_packed *k)
129 EBUG_ON(state->p < k->_data);
130 EBUG_ON(state->p >= k->_data + state->format->key_u64s);
132 *state->p = state->w;
135 struct unpack_state {
136 const struct bkey_format *format;
137 unsigned bits; /* bits remaining in current word */
138 u64 w; /* current word */
139 const u64 *p; /* pointer to next word */
143 static struct unpack_state unpack_state_init(const struct bkey_format *format,
144 const struct bkey_packed *k)
146 const u64 *p = high_word(format, k);
148 return (struct unpack_state) {
150 .bits = 64 - high_bit_offset,
151 .w = *p << high_bit_offset,
157 static u64 get_inc_field(struct unpack_state *state, unsigned field)
159 unsigned bits = state->format->bits_per_field[field];
160 u64 v = 0, offset = le64_to_cpu(state->format->field_offset[field]);
162 if (bits >= state->bits) {
163 v = state->w >> (64 - bits);
166 state->p = next_word(state->p);
167 state->w = *state->p;
171 /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
172 v |= (state->w >> 1) >> (63 - bits);
180 static void __set_inc_field(struct pack_state *state, unsigned field, u64 v)
182 unsigned bits = state->format->bits_per_field[field];
185 if (bits > state->bits) {
187 /* avoid shift by 64 if bits is 64 - bits is never 0 here: */
188 state->w |= (v >> 1) >> (bits - 1);
190 *state->p = state->w;
191 state->p = next_word(state->p);
197 state->w |= v << state->bits;
202 static bool set_inc_field(struct pack_state *state, unsigned field, u64 v)
204 unsigned bits = state->format->bits_per_field[field];
205 u64 offset = le64_to_cpu(state->format->field_offset[field]);
215 __set_inc_field(state, field, v);
220 * Note: does NOT set out->format (we don't know what it should be here!)
222 * Also: doesn't work on extents - it doesn't preserve the invariant that
223 * if k is packed bkey_start_pos(k) will successfully pack
225 static bool bch2_bkey_transform_key(const struct bkey_format *out_f,
226 struct bkey_packed *out,
227 const struct bkey_format *in_f,
228 const struct bkey_packed *in)
230 struct pack_state out_s = pack_state_init(out_f, out);
231 struct unpack_state in_s = unpack_state_init(in_f, in);
237 for (i = 0; i < BKEY_NR_FIELDS; i++)
238 if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
241 /* Can't happen because the val would be too big to unpack: */
242 EBUG_ON(in->u64s - in_f->key_u64s + out_f->key_u64s > U8_MAX);
244 pack_state_finish(&out_s, out);
245 out->u64s = out_f->key_u64s + in->u64s - in_f->key_u64s;
246 out->needs_whiteout = in->needs_whiteout;
247 out->type = in->type;
252 bool bch2_bkey_transform(const struct bkey_format *out_f,
253 struct bkey_packed *out,
254 const struct bkey_format *in_f,
255 const struct bkey_packed *in)
257 if (!bch2_bkey_transform_key(out_f, out, in_f, in))
260 memcpy_u64s((u64 *) out + out_f->key_u64s,
261 (u64 *) in + in_f->key_u64s,
262 (in->u64s - in_f->key_u64s));
266 struct bkey __bch2_bkey_unpack_key(const struct bkey_format *format,
267 const struct bkey_packed *in)
269 struct unpack_state state = unpack_state_init(format, in);
272 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
273 EBUG_ON(in->u64s < format->key_u64s);
274 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
275 EBUG_ON(in->u64s - format->key_u64s + BKEY_U64s > U8_MAX);
277 out.u64s = BKEY_U64s + in->u64s - format->key_u64s;
278 out.format = KEY_FORMAT_CURRENT;
279 out.needs_whiteout = in->needs_whiteout;
283 #define x(id, field) out.field = get_inc_field(&state, id);
290 #ifndef HAVE_BCACHEFS_COMPILED_UNPACK
291 struct bpos __bkey_unpack_pos(const struct bkey_format *format,
292 const struct bkey_packed *in)
294 struct unpack_state state = unpack_state_init(format, in);
297 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
298 EBUG_ON(in->u64s < format->key_u64s);
299 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
301 out.inode = get_inc_field(&state, BKEY_FIELD_INODE);
302 out.offset = get_inc_field(&state, BKEY_FIELD_OFFSET);
303 out.snapshot = get_inc_field(&state, BKEY_FIELD_SNAPSHOT);
310 * bch2_bkey_pack_key -- pack just the key, not the value
312 bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
313 const struct bkey_format *format)
315 struct pack_state state = pack_state_init(format, out);
318 EBUG_ON((void *) in == (void *) out);
319 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
320 EBUG_ON(in->format != KEY_FORMAT_CURRENT);
324 #define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
327 pack_state_finish(&state, out);
328 out->u64s = format->key_u64s + in->u64s - BKEY_U64s;
329 out->format = KEY_FORMAT_LOCAL_BTREE;
330 out->needs_whiteout = in->needs_whiteout;
331 out->type = in->type;
333 bch2_bkey_pack_verify(out, in, format);
338 * bch2_bkey_unpack -- unpack the key and the value
340 void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
341 const struct bkey_packed *src)
343 __bkey_unpack_key(b, &dst->k, src);
346 bkeyp_val(&b->format, src),
347 bkeyp_val_u64s(&b->format, src));
351 * bch2_bkey_pack -- pack the key and the value
353 bool bch2_bkey_pack(struct bkey_packed *out, const struct bkey_i *in,
354 const struct bkey_format *format)
356 struct bkey_packed tmp;
358 if (!bch2_bkey_pack_key(&tmp, &in->k, format))
361 memmove_u64s((u64 *) out + format->key_u64s,
363 bkey_val_u64s(&in->k));
364 memcpy_u64s_small(out, &tmp, format->key_u64s);
370 static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v)
372 unsigned bits = state->format->bits_per_field[field];
373 u64 offset = le64_to_cpu(state->format->field_offset[field]);
379 if (fls64(v) > bits) {
380 v = ~(~0ULL << bits);
384 __set_inc_field(state, field, v);
388 #ifdef CONFIG_BCACHEFS_DEBUG
389 static bool bkey_packed_successor(struct bkey_packed *out,
390 const struct btree *b,
391 struct bkey_packed k)
393 const struct bkey_format *f = &b->format;
394 unsigned nr_key_bits = b->nr_key_bits;
395 unsigned first_bit, offset;
398 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
405 first_bit = high_bit_offset + nr_key_bits - 1;
406 p = nth_word(high_word(f, out), first_bit >> 6);
407 offset = 63 - (first_bit & 63);
409 while (nr_key_bits) {
410 unsigned bits = min(64 - offset, nr_key_bits);
411 u64 mask = (~0ULL >> (64 - bits)) << offset;
413 if ((*p & mask) != mask) {
414 *p += 1ULL << offset;
415 EBUG_ON(bch2_bkey_cmp_packed(b, out, &k) <= 0);
428 static bool bkey_format_has_too_big_fields(const struct bkey_format *f)
430 for (unsigned i = 0; i < f->nr_fields; i++) {
431 unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
432 u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
433 u64 packed_max = f->bits_per_field[i]
434 ? ~((~0ULL << 1) << (f->bits_per_field[i] - 1))
436 u64 field_offset = le64_to_cpu(f->field_offset[i]);
438 if (packed_max + field_offset < packed_max ||
439 packed_max + field_offset > unpacked_max)
448 * Returns a packed key that compares <= in
450 * This is used in bset_search_tree(), where we need a packed pos in order to be
451 * able to compare against the keys in the auxiliary search tree - and it's
452 * legal to use a packed pos that isn't equivalent to the original pos,
453 * _provided_ it compares <= to the original pos.
455 enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
457 const struct btree *b)
459 const struct bkey_format *f = &b->format;
460 struct pack_state state = pack_state_init(f, out);
462 #ifdef CONFIG_BCACHEFS_DEBUG
463 struct bpos orig = in;
469 * bch2_bkey_pack_key() will write to all of f->key_u64s, minus the 3
470 * byte header, but pack_pos() won't if the len/version fields are big
471 * enough - we need to make sure to zero them out:
473 for (i = 0; i < f->key_u64s; i++)
476 if (unlikely(in.snapshot <
477 le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
480 return BKEY_PACK_POS_FAIL;
481 in.snapshot = KEY_SNAPSHOT_MAX;
485 if (unlikely(in.offset <
486 le64_to_cpu(f->field_offset[BKEY_FIELD_OFFSET]))) {
488 return BKEY_PACK_POS_FAIL;
489 in.offset = KEY_OFFSET_MAX;
490 in.snapshot = KEY_SNAPSHOT_MAX;
494 if (unlikely(in.inode <
495 le64_to_cpu(f->field_offset[BKEY_FIELD_INODE])))
496 return BKEY_PACK_POS_FAIL;
498 if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode))) {
499 in.offset = KEY_OFFSET_MAX;
500 in.snapshot = KEY_SNAPSHOT_MAX;
504 if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_OFFSET, in.offset))) {
505 in.snapshot = KEY_SNAPSHOT_MAX;
509 if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_SNAPSHOT, in.snapshot)))
512 pack_state_finish(&state, out);
513 out->u64s = f->key_u64s;
514 out->format = KEY_FORMAT_LOCAL_BTREE;
515 out->type = KEY_TYPE_deleted;
517 #ifdef CONFIG_BCACHEFS_DEBUG
519 BUG_ON(bkey_cmp_left_packed(b, out, &orig));
521 struct bkey_packed successor;
523 BUG_ON(bkey_cmp_left_packed(b, out, &orig) >= 0);
524 BUG_ON(bkey_packed_successor(&successor, b, *out) &&
525 bkey_cmp_left_packed(b, &successor, &orig) < 0 &&
526 !bkey_format_has_too_big_fields(f));
530 return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER;
533 void bch2_bkey_format_init(struct bkey_format_state *s)
537 for (i = 0; i < ARRAY_SIZE(s->field_min); i++)
538 s->field_min[i] = U64_MAX;
540 for (i = 0; i < ARRAY_SIZE(s->field_max); i++)
543 /* Make sure we can store a size of 0: */
544 s->field_min[BKEY_FIELD_SIZE] = 0;
547 void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
551 __bkey_format_add(s, field++, p.inode);
552 __bkey_format_add(s, field++, p.offset);
553 __bkey_format_add(s, field++, p.snapshot);
557 * We don't want it to be possible for the packed format to represent fields
558 * bigger than a u64... that will cause confusion and issues (like with
559 * bkey_packed_successor())
561 static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i,
562 unsigned bits, u64 offset)
564 unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
565 u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
567 bits = min(bits, unpacked_bits);
569 offset = bits == unpacked_bits ? 0 : min(offset, unpacked_max - ((1ULL << bits) - 1));
571 f->bits_per_field[i] = bits;
572 f->field_offset[i] = cpu_to_le64(offset);
575 struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s)
577 unsigned i, bits = KEY_PACKED_BITS_START;
578 struct bkey_format ret = {
579 .nr_fields = BKEY_NR_FIELDS,
582 for (i = 0; i < ARRAY_SIZE(s->field_min); i++) {
583 s->field_min[i] = min(s->field_min[i], s->field_max[i]);
585 set_format_field(&ret, i,
586 fls64(s->field_max[i] - s->field_min[i]),
589 bits += ret.bits_per_field[i];
592 /* allow for extent merging: */
593 if (ret.bits_per_field[BKEY_FIELD_SIZE]) {
594 unsigned b = min(4U, 32U - ret.bits_per_field[BKEY_FIELD_SIZE]);
596 ret.bits_per_field[BKEY_FIELD_SIZE] += b;
600 ret.key_u64s = DIV_ROUND_UP(bits, 64);
602 /* if we have enough spare bits, round fields up to nearest byte */
603 bits = ret.key_u64s * 64 - bits;
605 for (i = 0; i < ARRAY_SIZE(ret.bits_per_field); i++) {
606 unsigned r = round_up(ret.bits_per_field[i], 8) -
607 ret.bits_per_field[i];
610 set_format_field(&ret, i,
611 ret.bits_per_field[i] + r,
612 le64_to_cpu(ret.field_offset[i]));
617 #ifdef CONFIG_BCACHEFS_DEBUG
619 struct printbuf buf = PRINTBUF;
621 BUG_ON(bch2_bkey_format_invalid(NULL, &ret, 0, &buf));
628 int bch2_bkey_format_invalid(struct bch_fs *c,
629 struct bkey_format *f,
630 enum bkey_invalid_flags flags,
631 struct printbuf *err)
633 unsigned i, bits = KEY_PACKED_BITS_START;
635 if (f->nr_fields != BKEY_NR_FIELDS) {
636 prt_printf(err, "incorrect number of fields: got %u, should be %u",
637 f->nr_fields, BKEY_NR_FIELDS);
638 return -BCH_ERR_invalid;
642 * Verify that the packed format can't represent fields larger than the
645 for (i = 0; i < f->nr_fields; i++) {
646 if (!c || c->sb.version_min >= bcachefs_metadata_version_snapshot) {
647 unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
648 u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
649 u64 packed_max = f->bits_per_field[i]
650 ? ~((~0ULL << 1) << (f->bits_per_field[i] - 1))
652 u64 field_offset = le64_to_cpu(f->field_offset[i]);
654 if (packed_max + field_offset < packed_max ||
655 packed_max + field_offset > unpacked_max) {
656 prt_printf(err, "field %u too large: %llu + %llu > %llu",
657 i, packed_max, field_offset, unpacked_max);
658 return -BCH_ERR_invalid;
662 bits += f->bits_per_field[i];
665 if (f->key_u64s != DIV_ROUND_UP(bits, 64)) {
666 prt_printf(err, "incorrect key_u64s: got %u, should be %u",
667 f->key_u64s, DIV_ROUND_UP(bits, 64));
668 return -BCH_ERR_invalid;
674 void bch2_bkey_format_to_text(struct printbuf *out, const struct bkey_format *f)
676 prt_printf(out, "u64s %u fields ", f->key_u64s);
678 for (unsigned i = 0; i < ARRAY_SIZE(f->bits_per_field); i++) {
681 prt_printf(out, "%u:%llu",
682 f->bits_per_field[i],
683 le64_to_cpu(f->field_offset[i]));
688 * Most significant differing bit
689 * Bits are indexed from 0 - return is [0, nr_key_bits)
692 unsigned bch2_bkey_greatest_differing_bit(const struct btree *b,
693 const struct bkey_packed *l_k,
694 const struct bkey_packed *r_k)
696 const u64 *l = high_word(&b->format, l_k);
697 const u64 *r = high_word(&b->format, r_k);
698 unsigned nr_key_bits = b->nr_key_bits;
699 unsigned word_bits = 64 - high_bit_offset;
702 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
704 /* for big endian, skip past header */
705 l_v = *l & (~0ULL >> high_bit_offset);
706 r_v = *r & (~0ULL >> high_bit_offset);
708 while (nr_key_bits) {
709 if (nr_key_bits < word_bits) {
710 l_v >>= word_bits - nr_key_bits;
711 r_v >>= word_bits - nr_key_bits;
714 nr_key_bits -= word_bits;
718 return fls64(l_v ^ r_v) - 1 + nr_key_bits;
733 * Bits are indexed from 0 - return is [0, nr_key_bits)
736 unsigned bch2_bkey_ffs(const struct btree *b, const struct bkey_packed *k)
738 const u64 *p = high_word(&b->format, k);
739 unsigned nr_key_bits = b->nr_key_bits;
740 unsigned ret = 0, offset;
742 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
744 offset = nr_key_bits;
745 while (offset > 64) {
750 offset = 64 - offset;
752 while (nr_key_bits) {
753 unsigned bits = nr_key_bits + offset < 64
757 u64 mask = (~0ULL >> (64 - bits)) << offset;
760 return ret + __ffs64(*p & mask) - offset;
771 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK
773 #define I(_x) (*(out)++ = (_x))
775 #define I2(i0, i1) (I1(i0), I(i1))
776 #define I3(i0, i1, i2) (I2(i0, i1), I(i2))
777 #define I4(i0, i1, i2, i3) (I3(i0, i1, i2), I(i3))
778 #define I5(i0, i1, i2, i3, i4) (I4(i0, i1, i2, i3), I(i4))
780 static u8 *compile_bkey_field(const struct bkey_format *format, u8 *out,
781 enum bch_bkey_fields field,
782 unsigned dst_offset, unsigned dst_size,
785 unsigned bits = format->bits_per_field[field];
786 u64 offset = le64_to_cpu(format->field_offset[field]);
787 unsigned i, byte, bit_offset, align, shl, shr;
789 if (!bits && !offset) {
800 /* just return offset: */
804 if (offset > S32_MAX) {
805 /* mov [rdi + dst_offset], offset */
806 I3(0xc7, 0x47, dst_offset);
807 memcpy(out, &offset, 4);
810 I3(0xc7, 0x47, dst_offset + 4);
811 memcpy(out, (void *) &offset + 4, 4);
814 /* mov [rdi + dst_offset], offset */
816 I4(0x48, 0xc7, 0x47, dst_offset);
817 memcpy(out, &offset, 4);
822 /* mov [rdi + dst_offset], offset */
823 I3(0xc7, 0x47, dst_offset);
824 memcpy(out, &offset, 4);
834 bit_offset = format->key_u64s * 64;
835 for (i = 0; i <= field; i++)
836 bit_offset -= format->bits_per_field[i];
838 byte = bit_offset / 8;
839 bit_offset -= byte * 8;
843 if (bit_offset == 0 && bits == 8) {
844 /* movzx eax, BYTE PTR [rsi + imm8] */
845 I4(0x0f, 0xb6, 0x46, byte);
846 } else if (bit_offset == 0 && bits == 16) {
847 /* movzx eax, WORD PTR [rsi + imm8] */
848 I4(0x0f, 0xb7, 0x46, byte);
849 } else if (bit_offset + bits <= 32) {
850 align = min(4 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 3);
852 bit_offset += align * 8;
854 BUG_ON(bit_offset + bits > 32);
856 /* mov eax, [rsi + imm8] */
857 I3(0x8b, 0x46, byte);
861 I3(0xc1, 0xe8, bit_offset);
864 if (bit_offset + bits < 32) {
865 unsigned mask = ~0U >> (32 - bits);
869 memcpy(out, &mask, 4);
872 } else if (bit_offset + bits <= 64) {
873 align = min(8 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 7);
875 bit_offset += align * 8;
877 BUG_ON(bit_offset + bits > 64);
879 /* mov rax, [rsi + imm8] */
880 I4(0x48, 0x8b, 0x46, byte);
882 shl = 64 - bit_offset - bits;
883 shr = bit_offset + shl;
887 I4(0x48, 0xc1, 0xe0, shl);
892 I4(0x48, 0xc1, 0xe8, shr);
895 align = min(4 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 3);
897 bit_offset += align * 8;
899 BUG_ON(bit_offset + bits > 96);
901 /* mov rax, [rsi + byte] */
902 I4(0x48, 0x8b, 0x46, byte);
904 /* mov edx, [rsi + byte + 8] */
905 I3(0x8b, 0x56, byte + 8);
907 /* bits from next word: */
908 shr = bit_offset + bits - 64;
909 BUG_ON(shr > bit_offset);
911 /* shr rax, bit_offset */
912 I4(0x48, 0xc1, 0xe8, shr);
915 I4(0x48, 0xc1, 0xe2, 64 - shr);
918 I3(0x48, 0x09, 0xd0);
920 shr = bit_offset - shr;
924 I4(0x48, 0xc1, 0xe8, shr);
929 if (offset > S32_MAX) {
932 memcpy(out, &offset, 8);
935 I3(0x48, 0x01, 0xd0);
936 } else if (offset + (~0ULL >> (64 - bits)) > U32_MAX) {
939 memcpy(out, &offset, 4);
944 memcpy(out, &offset, 4);
950 /* mov [rdi + dst_offset], rax */
951 I4(0x48, 0x89, 0x47, dst_offset);
954 /* mov [rdi + dst_offset], eax */
955 I3(0x89, 0x47, dst_offset);
964 int bch2_compile_bkey_format(const struct bkey_format *format, void *_out)
966 bool eax_zeroed = false;
970 * rdi: dst - unpacked key
971 * rsi: src - packed key
974 /* k->u64s, k->format, k->type */
979 /* add eax, BKEY_U64s - format->key_u64s */
980 I5(0x05, BKEY_U64s - format->key_u64s, KEY_FORMAT_CURRENT, 0, 0);
982 /* and eax, imm32: mask out k->pad: */
983 I5(0x25, 0xff, 0xff, 0xff, 0);
988 #define x(id, field) \
989 out = compile_bkey_field(format, out, id, \
990 offsetof(struct bkey, field), \
991 sizeof(((struct bkey *) NULL)->field), \
999 return (void *) out - _out;
1006 int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *l,
1007 const struct bkey_packed *r,
1008 const struct btree *b)
1010 return __bch2_bkey_cmp_packed_format_checked_inlined(l, r, b);
1014 int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b,
1015 const struct bkey_packed *l,
1016 const struct bpos *r)
1018 return bpos_cmp(bkey_unpack_pos_format_checked(b, l), *r);
1022 int bch2_bkey_cmp_packed(const struct btree *b,
1023 const struct bkey_packed *l,
1024 const struct bkey_packed *r)
1026 return bch2_bkey_cmp_packed_inlined(b, l, r);
1030 int __bch2_bkey_cmp_left_packed(const struct btree *b,
1031 const struct bkey_packed *l,
1032 const struct bpos *r)
1034 const struct bkey *l_unpacked;
1036 return unlikely(l_unpacked = packed_to_bkey_c(l))
1037 ? bpos_cmp(l_unpacked->p, *r)
1038 : __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
1041 void bch2_bpos_swab(struct bpos *p)
1044 u8 *h = ((u8 *) &p[1]) - 1;
1053 void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
1055 const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current;
1056 u8 *l = k->key_start;
1057 u8 *h = (u8 *) (k->_data + f->key_u64s) - 1;
1066 #ifdef CONFIG_BCACHEFS_DEBUG
1067 void bch2_bkey_pack_test(void)
1069 struct bkey t = KEY(4134ULL, 1250629070527416633ULL, 0);
1070 struct bkey_packed p;
1072 struct bkey_format test_format = {
1074 .nr_fields = BKEY_NR_FIELDS,
1082 struct unpack_state in_s =
1083 unpack_state_init(&bch2_bkey_format_current, (void *) &t);
1084 struct pack_state out_s = pack_state_init(&test_format, &p);
1087 for (i = 0; i < out_s.format->nr_fields; i++) {
1088 u64 a, v = get_inc_field(&in_s, i);
1091 #define x(id, field) case id: a = t.field; break;
1099 panic("got %llu actual %llu i %u\n", v, a, i);
1101 if (!set_inc_field(&out_s, i, v))
1102 panic("failed at %u\n", i);
1105 BUG_ON(!bch2_bkey_pack_key(&p, &t, &test_format));