7 const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
9 struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
10 const struct bkey_packed *);
12 void bch2_to_binary(char *out, const u64 *p, unsigned nr_bits)
14 unsigned bit = high_bit_offset, done = 0;
18 if (done && !(done % 8))
20 *out++ = *p & (1ULL << (63 - bit)) ? '1' : '0';
23 if (done == nr_bits) {
34 #ifdef CONFIG_BCACHEFS_DEBUG
36 static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
37 const struct bkey *unpacked,
38 const struct bkey_format *format)
42 BUG_ON(bkeyp_val_u64s(format, packed) !=
43 bkey_val_u64s(unpacked));
45 BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed));
47 tmp = __bch2_bkey_unpack_key(format, packed);
49 if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
50 char buf1[160], buf2[160];
51 char buf3[160], buf4[160];
53 bch2_bkey_to_text(buf1, sizeof(buf1), unpacked);
54 bch2_bkey_to_text(buf2, sizeof(buf2), &tmp);
55 bch2_to_binary(buf3, (void *) unpacked, 80);
56 bch2_to_binary(buf4, high_word(format, packed), 80);
58 panic("keys differ: format u64s %u fields %u %u %u %u %u\n%s\n%s\n%s\n%s\n",
60 format->bits_per_field[0],
61 format->bits_per_field[1],
62 format->bits_per_field[2],
63 format->bits_per_field[3],
64 format->bits_per_field[4],
65 buf1, buf2, buf3, buf4);
70 static inline void bch2_bkey_pack_verify(const struct bkey_packed *packed,
71 const struct bkey *unpacked,
72 const struct bkey_format *format) {}
75 int bch2_bkey_to_text(char *buf, size_t size, const struct bkey *k)
77 char *out = buf, *end = buf + size;
79 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
81 p("u64s %u type %u %llu:%llu snap %u len %u ver %llu",
82 k->u64s, k->type, k->p.inode, k->p.offset,
83 k->p.snapshot, k->size, k->version.lo);
85 BUG_ON(bkey_packed(k));
88 case KEY_TYPE_DELETED:
91 case KEY_TYPE_DISCARD:
107 const struct bkey_format *format;
108 unsigned bits; /* bits remaining in current word */
109 u64 w; /* current word */
110 u64 *p; /* pointer to next word */
114 static struct pack_state pack_state_init(const struct bkey_format *format,
115 struct bkey_packed *k)
117 u64 *p = high_word(format, k);
119 return (struct pack_state) {
121 .bits = 64 - high_bit_offset,
128 static void pack_state_finish(struct pack_state *state,
129 struct bkey_packed *k)
131 EBUG_ON(state->p < k->_data);
132 EBUG_ON(state->p >= k->_data + state->format->key_u64s);
134 *state->p = state->w;
137 struct unpack_state {
138 const struct bkey_format *format;
139 unsigned bits; /* bits remaining in current word */
140 u64 w; /* current word */
141 const u64 *p; /* pointer to next word */
145 static struct unpack_state unpack_state_init(const struct bkey_format *format,
146 const struct bkey_packed *k)
148 const u64 *p = high_word(format, k);
150 return (struct unpack_state) {
152 .bits = 64 - high_bit_offset,
153 .w = *p << high_bit_offset,
159 static u64 get_inc_field(struct unpack_state *state, unsigned field)
161 unsigned bits = state->format->bits_per_field[field];
162 u64 v = 0, offset = le64_to_cpu(state->format->field_offset[field]);
164 if (bits >= state->bits) {
165 v = state->w >> (64 - bits);
168 state->p = next_word(state->p);
169 state->w = *state->p;
173 /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
174 v |= (state->w >> 1) >> (63 - bits);
182 static bool set_inc_field(struct pack_state *state, unsigned field, u64 v)
184 unsigned bits = state->format->bits_per_field[field];
185 u64 offset = le64_to_cpu(state->format->field_offset[field]);
195 if (bits > state->bits) {
197 /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
198 state->w |= (v >> 1) >> (bits - 1);
200 *state->p = state->w;
201 state->p = next_word(state->p);
207 state->w |= v << state->bits;
213 * Note: does NOT set out->format (we don't know what it should be here!)
215 * Also: doesn't work on extents - it doesn't preserve the invariant that
216 * if k is packed bkey_start_pos(k) will successfully pack
218 static bool bch2_bkey_transform_key(const struct bkey_format *out_f,
219 struct bkey_packed *out,
220 const struct bkey_format *in_f,
221 const struct bkey_packed *in)
223 struct pack_state out_s = pack_state_init(out_f, out);
224 struct unpack_state in_s = unpack_state_init(in_f, in);
229 for (i = 0; i < BKEY_NR_FIELDS; i++)
230 if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
233 /* Can't happen because the val would be too big to unpack: */
234 EBUG_ON(in->u64s - in_f->key_u64s + out_f->key_u64s > U8_MAX);
236 pack_state_finish(&out_s, out);
237 out->u64s = out_f->key_u64s + in->u64s - in_f->key_u64s;
238 out->needs_whiteout = in->needs_whiteout;
239 out->type = in->type;
244 bool bch2_bkey_transform(const struct bkey_format *out_f,
245 struct bkey_packed *out,
246 const struct bkey_format *in_f,
247 const struct bkey_packed *in)
249 if (!bch2_bkey_transform_key(out_f, out, in_f, in))
252 memcpy_u64s((u64 *) out + out_f->key_u64s,
253 (u64 *) in + in_f->key_u64s,
254 (in->u64s - in_f->key_u64s));
258 #define bkey_fields() \
259 x(BKEY_FIELD_INODE, p.inode) \
260 x(BKEY_FIELD_OFFSET, p.offset) \
261 x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
262 x(BKEY_FIELD_SIZE, size) \
263 x(BKEY_FIELD_VERSION_HI, version.hi) \
264 x(BKEY_FIELD_VERSION_LO, version.lo)
266 struct bkey __bch2_bkey_unpack_key(const struct bkey_format *format,
267 const struct bkey_packed *in)
269 struct unpack_state state = unpack_state_init(format, in);
272 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
273 EBUG_ON(in->u64s < format->key_u64s);
274 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
275 EBUG_ON(in->u64s - format->key_u64s + BKEY_U64s > U8_MAX);
277 out.u64s = BKEY_U64s + in->u64s - format->key_u64s;
278 out.format = KEY_FORMAT_CURRENT;
279 out.needs_whiteout = in->needs_whiteout;
283 #define x(id, field) out.field = get_inc_field(&state, id);
290 #ifndef HAVE_BCACHE_COMPILED_UNPACK
291 struct bpos __bkey_unpack_pos(const struct bkey_format *format,
292 const struct bkey_packed *in)
294 struct unpack_state state = unpack_state_init(format, in);
297 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
298 EBUG_ON(in->u64s < format->key_u64s);
299 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
301 out.inode = get_inc_field(&state, BKEY_FIELD_INODE);
302 out.offset = get_inc_field(&state, BKEY_FIELD_OFFSET);
303 out.snapshot = get_inc_field(&state, BKEY_FIELD_SNAPSHOT);
310 * bch2_bkey_pack_key -- pack just the key, not the value
312 bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
313 const struct bkey_format *format)
315 struct pack_state state = pack_state_init(format, out);
317 EBUG_ON((void *) in == (void *) out);
318 EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
319 EBUG_ON(in->format != KEY_FORMAT_CURRENT);
323 #define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
328 * Extents - we have to guarantee that if an extent is packed, a trimmed
329 * version will also pack:
331 if (bkey_start_offset(in) < format->field_offset[BKEY_FIELD_OFFSET])
334 pack_state_finish(&state, out);
335 out->u64s = format->key_u64s + in->u64s - BKEY_U64s;
336 out->format = KEY_FORMAT_LOCAL_BTREE;
337 out->needs_whiteout = in->needs_whiteout;
338 out->type = in->type;
340 bch2_bkey_pack_verify(out, in, format);
345 * bch2_bkey_unpack -- unpack the key and the value
347 void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
348 const struct bkey_packed *src)
350 dst->k = bkey_unpack_key(b, src);
353 bkeyp_val(&b->format, src),
354 bkeyp_val_u64s(&b->format, src));
358 * bch2_bkey_pack -- pack the key and the value
360 bool bch2_bkey_pack(struct bkey_packed *out, const struct bkey_i *in,
361 const struct bkey_format *format)
363 struct bkey_packed tmp;
365 if (!bch2_bkey_pack_key(&tmp, &in->k, format))
368 memmove_u64s((u64 *) out + format->key_u64s,
370 bkey_val_u64s(&in->k));
371 memcpy_u64s(out, &tmp, format->key_u64s);
377 static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v)
379 unsigned bits = state->format->bits_per_field[field];
380 u64 offset = le64_to_cpu(state->format->field_offset[field]);
386 if (fls64(v) > bits) {
387 v = ~(~0ULL << bits);
391 if (bits > state->bits) {
393 state->w |= (v >> 1) >> (bits - 1);
395 *state->p = state->w;
396 state->p = next_word(state->p);
402 state->w |= v << state->bits;
407 #ifdef CONFIG_BCACHEFS_DEBUG
408 static bool bkey_packed_successor(struct bkey_packed *out,
409 const struct btree *b,
410 struct bkey_packed k)
412 const struct bkey_format *f = &b->format;
413 unsigned nr_key_bits = b->nr_key_bits;
414 unsigned first_bit, offset;
417 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
424 first_bit = high_bit_offset + nr_key_bits - 1;
425 p = nth_word(high_word(f, out), first_bit >> 6);
426 offset = 63 - (first_bit & 63);
428 while (nr_key_bits) {
429 unsigned bits = min(64 - offset, nr_key_bits);
430 u64 mask = (~0ULL >> (64 - bits)) << offset;
432 if ((*p & mask) != mask) {
433 *p += 1ULL << offset;
434 EBUG_ON(bkey_cmp_packed(b, out, &k) <= 0);
449 * Returns a packed key that compares <= in
451 * This is used in bset_search_tree(), where we need a packed pos in order to be
452 * able to compare against the keys in the auxiliary search tree - and it's
453 * legal to use a packed pos that isn't equivalent to the original pos,
454 * _provided_ it compares <= to the original pos.
456 enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
458 const struct btree *b)
460 const struct bkey_format *f = &b->format;
461 struct pack_state state = pack_state_init(f, out);
462 #ifdef CONFIG_BCACHEFS_DEBUG
463 struct bpos orig = in;
469 if (unlikely(in.snapshot <
470 le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
473 return BKEY_PACK_POS_FAIL;
474 in.snapshot = KEY_SNAPSHOT_MAX;
478 if (unlikely(in.offset <
479 le64_to_cpu(f->field_offset[BKEY_FIELD_OFFSET]))) {
481 return BKEY_PACK_POS_FAIL;
482 in.offset = KEY_OFFSET_MAX;
483 in.snapshot = KEY_SNAPSHOT_MAX;
487 if (unlikely(in.inode <
488 le64_to_cpu(f->field_offset[BKEY_FIELD_INODE])))
489 return BKEY_PACK_POS_FAIL;
491 if (!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode)) {
492 in.offset = KEY_OFFSET_MAX;
493 in.snapshot = KEY_SNAPSHOT_MAX;
497 if (!set_inc_field_lossy(&state, BKEY_FIELD_OFFSET, in.offset)) {
498 in.snapshot = KEY_SNAPSHOT_MAX;
502 if (!set_inc_field_lossy(&state, BKEY_FIELD_SNAPSHOT, in.snapshot))
505 pack_state_finish(&state, out);
506 out->u64s = f->key_u64s;
507 out->format = KEY_FORMAT_LOCAL_BTREE;
508 out->type = KEY_TYPE_DELETED;
510 #ifdef CONFIG_BCACHEFS_DEBUG
512 BUG_ON(bkey_cmp_left_packed(b, out, &orig));
514 struct bkey_packed successor;
516 BUG_ON(bkey_cmp_left_packed(b, out, &orig) >= 0);
517 BUG_ON(bkey_packed_successor(&successor, b, *out) &&
518 bkey_cmp_left_packed(b, &successor, &orig) < 0);
522 return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER;
525 void bch2_bkey_format_init(struct bkey_format_state *s)
529 for (i = 0; i < ARRAY_SIZE(s->field_min); i++)
530 s->field_min[i] = U64_MAX;
532 for (i = 0; i < ARRAY_SIZE(s->field_max); i++)
535 /* Make sure we can store a size of 0: */
536 s->field_min[BKEY_FIELD_SIZE] = 0;
539 static void __bkey_format_add(struct bkey_format_state *s,
540 unsigned field, u64 v)
542 s->field_min[field] = min(s->field_min[field], v);
543 s->field_max[field] = max(s->field_max[field], v);
547 * Changes @format so that @k can be successfully packed with @format
549 void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
551 #define x(id, field) __bkey_format_add(s, id, k->field);
554 __bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k));
557 void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
561 __bkey_format_add(s, field++, p.inode);
562 __bkey_format_add(s, field++, p.offset);
563 __bkey_format_add(s, field++, p.snapshot);
567 * We don't want it to be possible for the packed format to represent fields
568 * bigger than a u64... that will cause confusion and issues (like with
569 * bkey_packed_successor())
571 static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i,
572 unsigned bits, u64 offset)
574 offset = bits == 64 ? 0 : min(offset, U64_MAX - ((1ULL << bits) - 1));
576 f->bits_per_field[i] = bits;
577 f->field_offset[i] = cpu_to_le64(offset);
580 struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s)
582 unsigned i, bits = KEY_PACKED_BITS_START;
583 struct bkey_format ret = {
584 .nr_fields = BKEY_NR_FIELDS,
587 for (i = 0; i < ARRAY_SIZE(s->field_min); i++) {
588 s->field_min[i] = min(s->field_min[i], s->field_max[i]);
590 set_format_field(&ret, i,
591 fls64(s->field_max[i] - s->field_min[i]),
594 bits += ret.bits_per_field[i];
597 /* allow for extent merging: */
598 if (ret.bits_per_field[BKEY_FIELD_SIZE]) {
599 ret.bits_per_field[BKEY_FIELD_SIZE] += 4;
603 ret.key_u64s = DIV_ROUND_UP(bits, 64);
605 /* if we have enough spare bits, round fields up to nearest byte */
606 bits = ret.key_u64s * 64 - bits;
608 for (i = 0; i < ARRAY_SIZE(ret.bits_per_field); i++) {
609 unsigned r = round_up(ret.bits_per_field[i], 8) -
610 ret.bits_per_field[i];
613 set_format_field(&ret, i,
614 ret.bits_per_field[i] + r,
615 le64_to_cpu(ret.field_offset[i]));
620 EBUG_ON(bch2_bkey_format_validate(&ret));
624 const char *bch2_bkey_format_validate(struct bkey_format *f)
626 unsigned i, bits = KEY_PACKED_BITS_START;
628 if (f->nr_fields != BKEY_NR_FIELDS)
629 return "invalid format: incorrect number of fields";
631 for (i = 0; i < f->nr_fields; i++) {
632 u64 field_offset = le64_to_cpu(f->field_offset[i]);
634 if (f->bits_per_field[i] > 64)
635 return "invalid format: field too large";
638 (f->bits_per_field[i] == 64 ||
639 (field_offset + ((1ULL << f->bits_per_field[i]) - 1) <
641 return "invalid format: offset + bits overflow";
643 bits += f->bits_per_field[i];
646 if (f->key_u64s != DIV_ROUND_UP(bits, 64))
647 return "invalid format: incorrect key_u64s";
653 * Most significant differing bit
654 * Bits are indexed from 0 - return is [0, nr_key_bits)
657 unsigned bch2_bkey_greatest_differing_bit(const struct btree *b,
658 const struct bkey_packed *l_k,
659 const struct bkey_packed *r_k)
661 const u64 *l = high_word(&b->format, l_k);
662 const u64 *r = high_word(&b->format, r_k);
663 unsigned nr_key_bits = b->nr_key_bits;
664 unsigned word_bits = 64 - high_bit_offset;
667 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
669 /* for big endian, skip past header */
670 l_v = *l & (~0ULL >> high_bit_offset);
671 r_v = *r & (~0ULL >> high_bit_offset);
673 while (nr_key_bits) {
674 if (nr_key_bits < word_bits) {
675 l_v >>= word_bits - nr_key_bits;
676 r_v >>= word_bits - nr_key_bits;
679 nr_key_bits -= word_bits;
683 return fls64(l_v ^ r_v) - 1 + nr_key_bits;
698 * Bits are indexed from 0 - return is [0, nr_key_bits)
701 unsigned bch2_bkey_ffs(const struct btree *b, const struct bkey_packed *k)
703 const u64 *p = high_word(&b->format, k);
704 unsigned nr_key_bits = b->nr_key_bits;
705 unsigned ret = 0, offset;
707 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
709 offset = nr_key_bits;
710 while (offset > 64) {
715 offset = 64 - offset;
717 while (nr_key_bits) {
718 unsigned bits = nr_key_bits + offset < 64
722 u64 mask = (~0ULL >> (64 - bits)) << offset;
725 return ret + __ffs64(*p & mask) - offset;
738 static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
739 unsigned nr_key_bits)
744 /* we shouldn't need asm for this, but gcc is being retarded: */
746 asm(".intel_syntax noprefix;"
758 "lea rdi, [rdi - 8];"
759 "lea rsi, [rsi - 8];"
774 ".att_syntax prefix;"
775 : "=&D" (d0), "=&S" (d1), "=&d" (d2), "=&c" (d3), "=&a" (cmp)
776 : "0" (l), "1" (r), "3" (nr_key_bits)
777 : "r8", "r9", "cc", "memory");
782 #define I(_x) (*(out)++ = (_x))
784 #define I2(i0, i1) (I1(i0), I(i1))
785 #define I3(i0, i1, i2) (I2(i0, i1), I(i2))
786 #define I4(i0, i1, i2, i3) (I3(i0, i1, i2), I(i3))
787 #define I5(i0, i1, i2, i3, i4) (I4(i0, i1, i2, i3), I(i4))
789 static u8 *compile_bkey_field(const struct bkey_format *format, u8 *out,
790 enum bch_bkey_fields field,
791 unsigned dst_offset, unsigned dst_size,
794 unsigned byte = format->key_u64s * sizeof(u64);
795 unsigned bits = format->bits_per_field[field];
796 u64 offset = format->field_offset[field];
797 unsigned i, bit_offset = 0;
800 if (!bits && !offset) {
811 /* just return offset: */
815 if (offset > S32_MAX) {
816 /* mov [rdi + dst_offset], offset */
817 I3(0xc7, 0x47, dst_offset);
818 memcpy(out, &offset, 4);
821 I3(0xc7, 0x47, dst_offset + 4);
822 memcpy(out, (void *) &offset + 4, 4);
825 /* mov [rdi + dst_offset], offset */
827 I4(0x48, 0xc7, 0x47, dst_offset);
828 memcpy(out, &offset, 4);
833 /* mov [rdi + dst_offset], offset */
834 I3(0xc7, 0x47, dst_offset);
835 memcpy(out, &offset, 4);
845 for (i = 0; i <= field; i++)
846 bit_offset += format->bits_per_field[i];
848 byte -= DIV_ROUND_UP(bit_offset, 8);
849 bit_offset = round_up(bit_offset, 8) - bit_offset;
853 if (bit_offset == 0 && bits == 8) {
854 /* movzx eax, BYTE PTR [rsi + imm8] */
855 I4(0x0f, 0xb6, 0x46, byte);
856 } else if (bit_offset == 0 && bits == 16) {
857 /* movzx eax, WORD PTR [rsi + imm8] */
858 I4(0x0f, 0xb7, 0x46, byte);
859 } else if (bit_offset + bits <= 32) {
860 /* mov eax, [rsi + imm8] */
861 I3(0x8b, 0x46, byte);
865 I3(0xc1, 0xe8, bit_offset);
868 if (bit_offset + bits < 32) {
869 unsigned mask = ~0U >> (32 - bits);
873 memcpy(out, &mask, 4);
876 } else if (bit_offset + bits <= 64) {
877 /* mov rax, [rsi + imm8] */
878 I4(0x48, 0x8b, 0x46, byte);
880 shl = 64 - bit_offset - bits;
881 shr = bit_offset + shl;
885 I4(0x48, 0xc1, 0xe0, shl);
890 I4(0x48, 0xc1, 0xe8, shr);
893 /* mov rax, [rsi + byte] */
894 I4(0x48, 0x8b, 0x46, byte);
896 /* mov edx, [rsi + byte + 8] */
897 I3(0x8b, 0x56, byte + 8);
899 /* bits from next word: */
900 shr = bit_offset + bits - 64;
901 BUG_ON(shr > bit_offset);
903 /* shr rax, bit_offset */
904 I4(0x48, 0xc1, 0xe8, shr);
907 I4(0x48, 0xc1, 0xe2, 64 - shr);
910 I3(0x48, 0x09, 0xd0);
912 shr = bit_offset - shr;
916 I4(0x48, 0xc1, 0xe8, shr);
921 if (offset > S32_MAX) {
924 memcpy(out, &offset, 8);
927 I3(0x48, 0x01, 0xd0);
928 } else if (offset + (~0ULL >> (64 - bits)) > U32_MAX) {
931 memcpy(out, &offset, 4);
936 memcpy(out, &offset, 4);
942 /* mov [rdi + dst_offset], rax */
943 I4(0x48, 0x89, 0x47, dst_offset);
946 /* mov [rdi + dst_offset], eax */
947 I3(0x89, 0x47, dst_offset);
956 int bch2_compile_bkey_format(const struct bkey_format *format, void *_out)
958 bool eax_zeroed = false;
962 * rdi: dst - unpacked key
963 * rsi: src - packed key
966 /* k->u64s, k->format, k->type */
971 /* add eax, BKEY_U64s - format->key_u64s */
972 I5(0x05, BKEY_U64s - format->key_u64s, KEY_FORMAT_CURRENT, 0, 0);
974 /* and eax, imm32: mask out k->pad: */
975 I5(0x25, 0xff, 0xff, 0xff, 0);
980 #define x(id, field) \
981 out = compile_bkey_field(format, out, id, \
982 offsetof(struct bkey, field), \
983 sizeof(((struct bkey *) NULL)->field), \
991 return (void *) out - _out;
995 static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
996 unsigned nr_key_bits)
1003 /* for big endian, skip past header */
1004 nr_key_bits += high_bit_offset;
1005 l_v = *l & (~0ULL >> high_bit_offset);
1006 r_v = *r & (~0ULL >> high_bit_offset);
1009 if (nr_key_bits < 64) {
1010 l_v >>= 64 - nr_key_bits;
1011 r_v >>= 64 - nr_key_bits;
1018 return l_v < r_v ? -1 : 1;
1033 int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *l,
1034 const struct bkey_packed *r,
1035 const struct btree *b)
1037 const struct bkey_format *f = &b->format;
1040 EBUG_ON(!bkey_packed(l) || !bkey_packed(r));
1041 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
1043 ret = __bkey_cmp_bits(high_word(f, l),
1047 EBUG_ON(ret != bkey_cmp(bkey_unpack_key_format_checked(b, l).p,
1048 bkey_unpack_key_format_checked(b, r).p));
1053 int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b,
1054 const struct bkey_packed *l,
1055 const struct bpos *r)
1057 return bkey_cmp(bkey_unpack_pos_format_checked(b, l), *r);
1061 int __bch2_bkey_cmp_packed(const struct bkey_packed *l,
1062 const struct bkey_packed *r,
1063 const struct btree *b)
1065 int packed = bkey_lr_packed(l, r);
1067 if (likely(packed == BKEY_PACKED_BOTH))
1068 return __bch2_bkey_cmp_packed_format_checked(l, r, b);
1071 case BKEY_PACKED_NONE:
1072 return bkey_cmp(((struct bkey *) l)->p,
1073 ((struct bkey *) r)->p);
1074 case BKEY_PACKED_LEFT:
1075 return __bch2_bkey_cmp_left_packed_format_checked(b,
1076 (struct bkey_packed *) l,
1077 &((struct bkey *) r)->p);
1078 case BKEY_PACKED_RIGHT:
1079 return -__bch2_bkey_cmp_left_packed_format_checked(b,
1080 (struct bkey_packed *) r,
1081 &((struct bkey *) l)->p);
1088 int __bch2_bkey_cmp_left_packed(const struct btree *b,
1089 const struct bkey_packed *l,
1090 const struct bpos *r)
1092 const struct bkey *l_unpacked;
1094 return unlikely(l_unpacked = packed_to_bkey_c(l))
1095 ? bkey_cmp(l_unpacked->p, *r)
1096 : __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
1099 void bch2_bpos_swab(struct bpos *p)
1102 u8 *h = ((u8 *) &p[1]) - 1;
1111 void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
1113 const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current;
1114 u8 *l = k->key_start;
1115 u8 *h = (u8 *) (k->_data + f->key_u64s) - 1;
1124 #ifdef CONFIG_BCACHEFS_DEBUG
1125 void bch2_bkey_pack_test(void)
1127 struct bkey t = KEY(4134ULL, 1250629070527416633ULL, 0);
1128 struct bkey_packed p;
1130 struct bkey_format test_format = {
1132 .nr_fields = BKEY_NR_FIELDS,
1139 struct unpack_state in_s =
1140 unpack_state_init(&bch2_bkey_format_current, (void *) &t);
1141 struct pack_state out_s = pack_state_init(&test_format, &p);
1144 for (i = 0; i < out_s.format->nr_fields; i++) {
1145 u64 a, v = get_inc_field(&in_s, i);
1148 #define x(id, field) case id: a = t.field; break;
1156 panic("got %llu actual %llu i %u\n", v, a, i);
1158 if (!set_inc_field(&out_s, i, v))
1159 panic("failed at %u\n", i);
1162 BUG_ON(!bch2_bkey_pack_key(&p, &t, &test_format));