2 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
4 #include <linux/kernel.h>
10 const struct bkey_format bch_bkey_format_current = BKEY_FORMAT_CURRENT;
12 struct bkey __bkey_unpack_key(const struct bkey_format *,
13 const struct bkey_packed *);
15 void bch_to_binary(char *out, const u64 *p, unsigned nr_bits)
17 unsigned bit = high_bit_offset, done = 0;
21 if (done && !(done % 8))
23 *out++ = *p & (1ULL << (63 - bit)) ? '1' : '0';
26 if (done == nr_bits) {
37 #ifdef CONFIG_BCACHE_DEBUG
39 static void bch_bkey_pack_verify(const struct bkey_packed *packed,
40 const struct bkey *unpacked,
41 const struct bkey_format *format)
45 BUG_ON(bkeyp_val_u64s(format, packed) !=
46 bkey_val_u64s(unpacked));
48 BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed));
50 tmp = __bkey_unpack_key(format, packed);
52 if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
53 char buf1[160], buf2[160];
54 char buf3[160], buf4[160];
56 bch_bkey_to_text(buf1, sizeof(buf1), unpacked);
57 bch_bkey_to_text(buf2, sizeof(buf2), &tmp);
58 bch_to_binary(buf3, (void *) unpacked, 80);
59 bch_to_binary(buf4, high_word(format, packed), 80);
61 panic("keys differ: format u64s %u fields %u %u %u %u %u\n%s\n%s\n%s\n%s\n",
63 format->bits_per_field[0],
64 format->bits_per_field[1],
65 format->bits_per_field[2],
66 format->bits_per_field[3],
67 format->bits_per_field[4],
68 buf1, buf2, buf3, buf4);
73 static inline void bch_bkey_pack_verify(const struct bkey_packed *packed,
74 const struct bkey *unpacked,
75 const struct bkey_format *format) {}
78 int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
80 char *out = buf, *end = buf + size;
82 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
84 p("u64s %u type %u %llu:%llu snap %u len %u ver %u",
85 k->u64s, k->type, k->p.inode, k->p.offset,
86 k->p.snapshot, k->size, k->version);
88 BUG_ON(bkey_packed(k));
91 case KEY_TYPE_DELETED:
94 case KEY_TYPE_DISCARD:
100 case KEY_TYPE_COOKIE:
110 const struct bkey_format *format;
111 unsigned bits; /* bits remaining in current word */
112 u64 w; /* current word */
113 u64 *p; /* pointer to next word */
117 static struct pack_state pack_state_init(const struct bkey_format *format,
118 struct bkey_packed *k)
120 u64 *p = high_word(format, k);
122 return (struct pack_state) {
124 .bits = 64 - high_bit_offset,
131 static void pack_state_finish(struct pack_state *state,
132 struct bkey_packed *k)
134 EBUG_ON(state->p < k->_data);
135 EBUG_ON(state->p >= k->_data + state->format->key_u64s);
137 *state->p = state->w;
140 struct unpack_state {
141 const struct bkey_format *format;
142 unsigned bits; /* bits remaining in current word */
143 u64 w; /* current word */
144 const u64 *p; /* pointer to next word */
148 static struct unpack_state unpack_state_init(const struct bkey_format *format,
149 const struct bkey_packed *k)
151 const u64 *p = high_word(format, k);
153 return (struct unpack_state) {
155 .bits = 64 - high_bit_offset,
156 .w = *p << high_bit_offset,
162 static u64 get_inc_field(struct unpack_state *state, unsigned field)
164 unsigned bits = state->format->bits_per_field[field];
165 u64 v = 0, offset = le64_to_cpu(state->format->field_offset[field]);
167 if (bits >= state->bits) {
168 v = state->w >> (64 - bits);
171 state->p = next_word(state->p);
172 state->w = *state->p;
176 /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
177 v |= (state->w >> 1) >> (63 - bits);
185 static bool set_inc_field(struct pack_state *state, unsigned field, u64 v)
187 unsigned bits = state->format->bits_per_field[field];
188 u64 offset = le64_to_cpu(state->format->field_offset[field]);
198 if (bits > state->bits) {
200 /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
201 state->w |= (v >> 1) >> (bits - 1);
203 *state->p = state->w;
204 state->p = next_word(state->p);
210 state->w |= v << state->bits;
216 * Note: does NOT set out->format (we don't know what it should be here!)
218 * Also: doesn't work on extents - it doesn't preserve the invariant that
219 * if k is packed bkey_start_pos(k) will successfully pack
221 static bool bch_bkey_transform_key(const struct bkey_format *out_f,
222 struct bkey_packed *out,
223 const struct bkey_format *in_f,
224 const struct bkey_packed *in)
226 struct pack_state out_s = pack_state_init(out_f, out);
227 struct unpack_state in_s = unpack_state_init(in_f, in);
232 for (i = 0; i < BKEY_NR_FIELDS; i++)
233 if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
236 /* Can't happen because the val would be too big to unpack: */
237 EBUG_ON(in->u64s - in_f->key_u64s + out_f->key_u64s > U8_MAX);
239 pack_state_finish(&out_s, out);
240 out->u64s = out_f->key_u64s + in->u64s - in_f->key_u64s;
241 out->needs_whiteout = in->needs_whiteout;
242 out->type = in->type;
247 bool bch_bkey_transform(const struct bkey_format *out_f,
248 struct bkey_packed *out,
249 const struct bkey_format *in_f,
250 const struct bkey_packed *in)
252 if (!bch_bkey_transform_key(out_f, out, in_f, in))
255 memcpy_u64s((u64 *) out + out_f->key_u64s,
256 (u64 *) in + in_f->key_u64s,
257 (in->u64s - in_f->key_u64s));
261 struct bkey __bkey_unpack_key(const struct bkey_format *format,
262 const struct bkey_packed *in)
264 struct unpack_state state = unpack_state_init(format, in);
267 EBUG_ON(format->nr_fields != 5);
268 EBUG_ON(in->u64s < format->key_u64s);
269 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
270 EBUG_ON(in->u64s - format->key_u64s + BKEY_U64s > U8_MAX);
272 out.u64s = BKEY_U64s + in->u64s - format->key_u64s;
273 out.format = KEY_FORMAT_CURRENT;
274 out.needs_whiteout = in->needs_whiteout;
277 out.p.inode = get_inc_field(&state, BKEY_FIELD_INODE);
278 out.p.offset = get_inc_field(&state, BKEY_FIELD_OFFSET);
279 out.p.snapshot = get_inc_field(&state, BKEY_FIELD_SNAPSHOT);
280 out.size = get_inc_field(&state, BKEY_FIELD_SIZE);
281 out.version = get_inc_field(&state, BKEY_FIELD_VERSION);
286 #ifndef HAVE_BCACHE_COMPILED_UNPACK
287 struct bpos __bkey_unpack_pos(const struct bkey_format *format,
288 const struct bkey_packed *in)
290 struct unpack_state state = unpack_state_init(format, in);
293 EBUG_ON(format->nr_fields != 5);
294 EBUG_ON(in->u64s < format->key_u64s);
295 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
297 out.inode = get_inc_field(&state, BKEY_FIELD_INODE);
298 out.offset = get_inc_field(&state, BKEY_FIELD_OFFSET);
299 out.snapshot = get_inc_field(&state, BKEY_FIELD_SNAPSHOT);
306 * bkey_pack_key -- pack just the key, not the value
308 bool bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
309 const struct bkey_format *format)
311 struct pack_state state = pack_state_init(format, out);
313 EBUG_ON((void *) in == (void *) out);
314 EBUG_ON(format->nr_fields != 5);
315 EBUG_ON(in->format != KEY_FORMAT_CURRENT);
319 if (!set_inc_field(&state, BKEY_FIELD_INODE, in->p.inode) ||
320 !set_inc_field(&state, BKEY_FIELD_OFFSET, in->p.offset) ||
321 !set_inc_field(&state, BKEY_FIELD_SNAPSHOT, in->p.snapshot) ||
322 !set_inc_field(&state, BKEY_FIELD_SIZE, in->size) ||
323 !set_inc_field(&state, BKEY_FIELD_VERSION, in->version))
327 * Extents - we have to guarantee that if an extent is packed, a trimmed
328 * version will also pack:
330 if (bkey_start_offset(in) < format->field_offset[BKEY_FIELD_OFFSET])
333 pack_state_finish(&state, out);
334 out->u64s = format->key_u64s + in->u64s - BKEY_U64s;
335 out->format = KEY_FORMAT_LOCAL_BTREE;
336 out->needs_whiteout = in->needs_whiteout;
337 out->type = in->type;
339 bch_bkey_pack_verify(out, in, format);
344 * Alternate implementations using bch_bkey_transform_key() - unfortunately, too
348 struct bkey __bkey_unpack_key(const struct bkey_format *format,
349 const struct bkey_packed *in)
354 EBUG_ON(format->nr_fields != 5);
355 EBUG_ON(in->u64s < format->key_u64s);
356 EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
358 s = bch_bkey_transform_key(&bch_bkey_format_current, (void *) &out,
362 out.format = KEY_FORMAT_CURRENT;
367 bool bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
368 const struct bkey_format *format)
370 EBUG_ON(format->nr_fields != 5);
371 EBUG_ON(in->format != KEY_FORMAT_CURRENT);
373 if (!bch_bkey_transform_key(format, out,
374 &bch_bkey_format_current, (void *) in))
377 out->format = KEY_FORMAT_LOCAL_BTREE;
379 bch_bkey_pack_verify(out, in, format);
385 * bkey_unpack -- unpack the key and the value
387 void bkey_unpack(const struct btree *b, struct bkey_i *dst,
388 const struct bkey_packed *src)
390 dst->k = bkey_unpack_key(b, src);
393 bkeyp_val(&b->format, src),
394 bkeyp_val_u64s(&b->format, src));
398 * bkey_pack -- pack the key and the value
400 bool bkey_pack(struct bkey_packed *out, const struct bkey_i *in,
401 const struct bkey_format *format)
403 struct bkey_packed tmp;
405 if (!bkey_pack_key(&tmp, &in->k, format))
408 memmove_u64s((u64 *) out + format->key_u64s,
410 bkey_val_u64s(&in->k));
411 memcpy_u64s(out, &tmp, format->key_u64s);
417 static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v)
419 unsigned bits = state->format->bits_per_field[field];
420 u64 offset = le64_to_cpu(state->format->field_offset[field]);
426 if (fls64(v) > bits) {
427 v = ~(~0ULL << bits);
431 if (bits > state->bits) {
433 state->w |= (v >> 1) >> (bits - 1);
435 *state->p = state->w;
436 state->p = next_word(state->p);
442 state->w |= v << state->bits;
447 #ifdef CONFIG_BCACHE_DEBUG
448 static bool bkey_packed_successor(struct bkey_packed *out,
449 const struct btree *b,
450 struct bkey_packed k)
452 const struct bkey_format *f = &b->format;
453 unsigned nr_key_bits = b->nr_key_bits;
454 unsigned first_bit, offset;
457 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
464 first_bit = high_bit_offset + nr_key_bits - 1;
465 p = nth_word(high_word(f, out), first_bit >> 6);
466 offset = 63 - (first_bit & 63);
468 while (nr_key_bits) {
469 unsigned bits = min(64 - offset, nr_key_bits);
470 u64 mask = (~0ULL >> (64 - bits)) << offset;
472 if ((*p & mask) != mask) {
473 *p += 1ULL << offset;
474 EBUG_ON(bkey_cmp_packed(b, out, &k) <= 0);
489 * Returns a packed key that compares <= in
491 * This is used in bset_search_tree(), where we need a packed pos in order to be
492 * able to compare against the keys in the auxiliary search tree - and it's
493 * legal to use a packed pos that isn't equivalent to the original pos,
494 * _provided_ it compares <= to the original pos.
496 enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out,
498 const struct btree *b)
500 const struct bkey_format *f = &b->format;
501 struct pack_state state = pack_state_init(f, out);
502 #ifdef CONFIG_BCACHE_DEBUG
503 struct bpos orig = in;
509 if (unlikely(in.snapshot <
510 le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
513 return BKEY_PACK_POS_FAIL;
514 in.snapshot = KEY_SNAPSHOT_MAX;
518 if (unlikely(in.offset <
519 le64_to_cpu(f->field_offset[BKEY_FIELD_OFFSET]))) {
521 return BKEY_PACK_POS_FAIL;
522 in.offset = KEY_OFFSET_MAX;
523 in.snapshot = KEY_SNAPSHOT_MAX;
527 if (unlikely(in.inode <
528 le64_to_cpu(f->field_offset[BKEY_FIELD_INODE])))
529 return BKEY_PACK_POS_FAIL;
531 if (!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode)) {
532 in.offset = KEY_OFFSET_MAX;
533 in.snapshot = KEY_SNAPSHOT_MAX;
537 if (!set_inc_field_lossy(&state, BKEY_FIELD_OFFSET, in.offset)) {
538 in.snapshot = KEY_SNAPSHOT_MAX;
542 if (!set_inc_field_lossy(&state, BKEY_FIELD_SNAPSHOT, in.snapshot))
545 pack_state_finish(&state, out);
546 out->u64s = f->key_u64s;
547 out->format = KEY_FORMAT_LOCAL_BTREE;
548 out->type = KEY_TYPE_DELETED;
550 #ifdef CONFIG_BCACHE_DEBUG
552 BUG_ON(bkey_cmp_left_packed(b, out, &orig));
554 struct bkey_packed successor;
556 BUG_ON(bkey_cmp_left_packed(b, out, &orig) >= 0);
557 BUG_ON(bkey_packed_successor(&successor, b, *out) &&
558 bkey_cmp_left_packed(b, &successor, &orig) < 0);
562 return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER;
565 void bch_bkey_format_init(struct bkey_format_state *s)
569 for (i = 0; i < ARRAY_SIZE(s->field_min); i++)
570 s->field_min[i] = U64_MAX;
572 for (i = 0; i < ARRAY_SIZE(s->field_max); i++)
575 /* Make sure we can store a size of 0: */
576 s->field_min[BKEY_FIELD_SIZE] = 0;
579 static void __bkey_format_add(struct bkey_format_state *s,
580 unsigned field, u64 v)
582 s->field_min[field] = min(s->field_min[field], v);
583 s->field_max[field] = max(s->field_max[field], v);
587 * Changes @format so that @k can be successfully packed with @format
589 void bch_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
591 __bkey_format_add(s, BKEY_FIELD_INODE, k->p.inode);
592 __bkey_format_add(s, BKEY_FIELD_OFFSET, k->p.offset);
593 __bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k));
594 __bkey_format_add(s, BKEY_FIELD_SNAPSHOT, k->p.snapshot);
595 __bkey_format_add(s, BKEY_FIELD_SIZE, k->size);
596 __bkey_format_add(s, BKEY_FIELD_VERSION, k->version);
599 void bch_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
603 __bkey_format_add(s, field++, p.inode);
604 __bkey_format_add(s, field++, p.offset);
605 __bkey_format_add(s, field++, p.snapshot);
609 * We don't want it to be possible for the packed format to represent fields
610 * bigger than a u64... that will cause confusion and issues (like with
611 * bkey_packed_successor())
613 static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i,
614 unsigned bits, u64 offset)
616 offset = bits == 64 ? 0 : min(offset, U64_MAX - ((1ULL << bits) - 1));
618 f->bits_per_field[i] = bits;
619 f->field_offset[i] = cpu_to_le64(offset);
622 struct bkey_format bch_bkey_format_done(struct bkey_format_state *s)
624 unsigned i, bits = KEY_PACKED_BITS_START;
625 struct bkey_format ret = {
626 .nr_fields = BKEY_NR_FIELDS,
629 for (i = 0; i < ARRAY_SIZE(s->field_min); i++) {
630 s->field_min[i] = min(s->field_min[i], s->field_max[i]);
632 set_format_field(&ret, i,
633 fls64(s->field_max[i] - s->field_min[i]),
636 bits += ret.bits_per_field[i];
639 ret.key_u64s = DIV_ROUND_UP(bits, 64);
641 /* if we have enough spare bits, round fields up to nearest byte */
642 bits = ret.key_u64s * 64 - bits;
644 for (i = 0; i < ARRAY_SIZE(ret.bits_per_field); i++) {
645 unsigned r = round_up(ret.bits_per_field[i], 8) -
646 ret.bits_per_field[i];
649 set_format_field(&ret, i,
650 ret.bits_per_field[i] + r,
651 le64_to_cpu(ret.field_offset[i]));
656 EBUG_ON(bch_bkey_format_validate(&ret));
660 const char *bch_bkey_format_validate(struct bkey_format *f)
662 unsigned i, bits = KEY_PACKED_BITS_START;
664 if (f->nr_fields != BKEY_NR_FIELDS)
665 return "invalid format: incorrect number of fields";
667 for (i = 0; i < f->nr_fields; i++) {
668 u64 field_offset = le64_to_cpu(f->field_offset[i]);
670 if (f->bits_per_field[i] > 64)
671 return "invalid format: field too large";
674 (f->bits_per_field[i] == 64 ||
675 (field_offset + ((1ULL << f->bits_per_field[i]) - 1) <
677 return "invalid format: offset + bits overflow";
679 bits += f->bits_per_field[i];
682 if (f->key_u64s != DIV_ROUND_UP(bits, 64))
683 return "invalid format: incorrect key_u64s";
689 * Most significant differing bit
690 * Bits are indexed from 0 - return is [0, nr_key_bits)
693 unsigned bkey_greatest_differing_bit(const struct btree *b,
694 const struct bkey_packed *l_k,
695 const struct bkey_packed *r_k)
697 const u64 *l = high_word(&b->format, l_k);
698 const u64 *r = high_word(&b->format, r_k);
699 unsigned nr_key_bits = b->nr_key_bits;
700 unsigned word_bits = 64 - high_bit_offset;
703 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
705 /* for big endian, skip past header */
706 l_v = *l & (~0ULL >> high_bit_offset);
707 r_v = *r & (~0ULL >> high_bit_offset);
709 while (nr_key_bits) {
710 if (nr_key_bits < word_bits) {
711 l_v >>= word_bits - nr_key_bits;
712 r_v >>= word_bits - nr_key_bits;
715 nr_key_bits -= word_bits;
719 return fls64(l_v ^ r_v) - 1 + nr_key_bits;
734 * Bits are indexed from 0 - return is [0, nr_key_bits)
737 unsigned bkey_ffs(const struct btree *b,
738 const struct bkey_packed *k)
740 const u64 *p = high_word(&b->format, k);
741 unsigned nr_key_bits = b->nr_key_bits;
742 unsigned ret = 0, offset;
744 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
746 offset = nr_key_bits;
747 while (offset > 64) {
752 offset = 64 - offset;
754 while (nr_key_bits) {
755 unsigned bits = nr_key_bits + offset < 64
759 u64 mask = (~0ULL >> (64 - bits)) << offset;
762 return ret + __ffs64(*p & mask) - offset;
775 static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
776 unsigned nr_key_bits)
781 /* we shouldn't need asm for this, but gcc is being retarded: */
783 asm(".intel_syntax noprefix;"
795 "lea rdi, [rdi - 8];"
796 "lea rsi, [rsi - 8];"
811 ".att_syntax prefix;"
812 : "=&D" (d0), "=&S" (d1), "=&d" (d2), "=&c" (d3), "=&a" (cmp)
813 : "0" (l), "1" (r), "3" (nr_key_bits)
814 : "r8", "r9", "cc", "memory");
819 #define I(_x) (*(out)++ = (_x))
821 #define I2(i0, i1) (I1(i0), I(i1))
822 #define I3(i0, i1, i2) (I2(i0, i1), I(i2))
823 #define I4(i0, i1, i2, i3) (I3(i0, i1, i2), I(i3))
824 #define I5(i0, i1, i2, i3, i4) (I4(i0, i1, i2, i3), I(i4))
826 static u8 *compile_bkey_field(const struct bkey_format *format, u8 *out,
827 enum bch_bkey_fields field,
828 unsigned dst_offset, unsigned dst_size,
831 unsigned byte = format->key_u64s * sizeof(u64);
832 unsigned bits = format->bits_per_field[field];
833 u64 offset = format->field_offset[field];
834 unsigned i, bit_offset = 0;
837 if (!bits && !offset) {
848 /* just return offset: */
852 if (offset > S32_MAX) {
853 /* mov [rdi + dst_offset], offset */
854 I3(0xc7, 0x47, dst_offset);
855 memcpy(out, &offset, 4);
858 I3(0xc7, 0x47, dst_offset + 4);
859 memcpy(out, (void *) &offset + 4, 4);
862 /* mov [rdi + dst_offset], offset */
864 I4(0x48, 0xc7, 0x47, dst_offset);
865 memcpy(out, &offset, 4);
870 /* mov [rdi + dst_offset], offset */
871 I3(0xc7, 0x47, dst_offset);
872 memcpy(out, &offset, 4);
882 for (i = 0; i <= field; i++)
883 bit_offset += format->bits_per_field[i];
885 byte -= DIV_ROUND_UP(bit_offset, 8);
886 bit_offset = round_up(bit_offset, 8) - bit_offset;
890 if (bit_offset == 0 && bits == 8) {
891 /* movzx eax, BYTE PTR [rsi + imm8] */
892 I4(0x0f, 0xb6, 0x46, byte);
893 } else if (bit_offset == 0 && bits == 16) {
894 /* movzx eax, WORD PTR [rsi + imm8] */
895 I4(0x0f, 0xb7, 0x46, byte);
896 } else if (bit_offset + bits <= 32) {
897 /* mov eax, [rsi + imm8] */
898 I3(0x8b, 0x46, byte);
902 I3(0xc1, 0xe8, bit_offset);
905 if (bit_offset + bits < 32) {
906 unsigned mask = ~0U >> (32 - bits);
910 memcpy(out, &mask, 4);
913 } else if (bit_offset + bits <= 64) {
914 /* mov rax, [rsi + imm8] */
915 I4(0x48, 0x8b, 0x46, byte);
917 shl = 64 - bit_offset - bits;
918 shr = bit_offset + shl;
922 I4(0x48, 0xc1, 0xe0, shl);
927 I4(0x48, 0xc1, 0xe8, shr);
930 /* mov rax, [rsi + byte] */
931 I4(0x48, 0x8b, 0x46, byte);
933 /* mov edx, [rsi + byte + 8] */
934 I3(0x8b, 0x56, byte + 8);
936 /* bits from next word: */
937 shr = bit_offset + bits - 64;
938 BUG_ON(shr > bit_offset);
940 /* shr rax, bit_offset */
941 I4(0x48, 0xc1, 0xe8, shr);
944 I4(0x48, 0xc1, 0xe2, 64 - shr);
947 I3(0x48, 0x09, 0xd0);
949 shr = bit_offset - shr;
953 I4(0x48, 0xc1, 0xe8, shr);
958 if (offset > S32_MAX) {
961 memcpy(out, &offset, 8);
964 I3(0x48, 0x01, 0xd0);
965 } else if (offset + (~0ULL >> (64 - bits)) > U32_MAX) {
968 memcpy(out, &offset, 4);
973 memcpy(out, &offset, 4);
979 /* mov [rdi + dst_offset], rax */
980 I4(0x48, 0x89, 0x47, dst_offset);
983 /* mov [rdi + dst_offset], eax */
984 I3(0x89, 0x47, dst_offset);
993 int bch_compile_bkey_format(const struct bkey_format *format, void *_out)
995 bool eax_zeroed = false;
999 * rdi: dst - unpacked key
1000 * rsi: src - packed key
1003 /* k->u64s, k->format, k->type */
1005 /* mov eax, [rsi] */
1008 /* add eax, BKEY_U64s - format->key_u64s */
1009 I5(0x05, BKEY_U64s - format->key_u64s, KEY_FORMAT_CURRENT, 0, 0);
1011 /* and eax, imm32: mask out k->pad: */
1012 I5(0x25, 0xff, 0xff, 0xff, 0);
1014 /* mov [rdi], eax */
1017 out = compile_bkey_field(format, out, BKEY_FIELD_INODE,
1018 offsetof(struct bkey, p.inode), 8,
1021 out = compile_bkey_field(format, out, BKEY_FIELD_OFFSET,
1022 offsetof(struct bkey, p.offset), 8,
1025 out = compile_bkey_field(format, out, BKEY_FIELD_SNAPSHOT,
1026 offsetof(struct bkey, p.snapshot), 4,
1029 out = compile_bkey_field(format, out, BKEY_FIELD_SIZE,
1030 offsetof(struct bkey, size), 4,
1033 out = compile_bkey_field(format, out, BKEY_FIELD_VERSION,
1034 offsetof(struct bkey, version), 4,
1040 return (void *) out - _out;
1044 static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
1045 unsigned nr_key_bits)
1052 /* for big endian, skip past header */
1053 nr_key_bits += high_bit_offset;
1054 l_v = *l & (~0ULL >> high_bit_offset);
1055 r_v = *r & (~0ULL >> high_bit_offset);
1058 if (nr_key_bits < 64) {
1059 l_v >>= 64 - nr_key_bits;
1060 r_v >>= 64 - nr_key_bits;
1067 return l_v < r_v ? -1 : 1;
1082 * Would like to use this if we can make __bkey_cmp_bits() fast enough, it'll be
1083 * a decent reduction in code size
1086 static int bkey_cmp_verify(const struct bkey *l, const struct bkey *r)
1088 if (l->p.inode != r->p.inode)
1089 return l->p.inode < r->p.inode ? -1 : 1;
1091 if (l->p.offset != r->p.offset)
1092 return l->p.offset < r->p.offset ? -1 : 1;
1094 if (l->p.snapshot != r->p.snapshot)
1095 return l->p.snapshot < r->p.snapshot ? -1 : 1;
1100 int bkey_cmp(const struct bkey *l, const struct bkey *r)
1104 EBUG_ON(bkey_packed(l) || bkey_packed(r));
1106 ret = __bkey_cmp_bits((sizeof(l->inode) +
1108 sizeof(l->snapshot)) * BITS_PER_BYTE,
1109 __high_word(BKEY_U64s, l),
1110 __high_word(BKEY_U64s, r));
1112 BUG_ON(ret != bkey_cmp_verify(l, r));
1119 int __bkey_cmp_packed_format_checked(const struct bkey_packed *l,
1120 const struct bkey_packed *r,
1121 const struct btree *b)
1123 const struct bkey_format *f = &b->format;
1126 EBUG_ON(!bkey_packed(l) || !bkey_packed(r));
1127 EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
1129 ret = __bkey_cmp_bits(high_word(f, l),
1133 EBUG_ON(ret != bkey_cmp(bkey_unpack_key_format_checked(b, l).p,
1134 bkey_unpack_key_format_checked(b, r).p));
1139 int __bkey_cmp_left_packed_format_checked(const struct btree *b,
1140 const struct bkey_packed *l,
1141 const struct bpos *r)
1143 return bkey_cmp(bkey_unpack_pos_format_checked(b, l), *r);
1147 int __bkey_cmp_packed(const struct bkey_packed *l,
1148 const struct bkey_packed *r,
1149 const struct btree *b)
1151 int packed = bkey_lr_packed(l, r);
1153 if (likely(packed == BKEY_PACKED_BOTH))
1154 return __bkey_cmp_packed_format_checked(l, r, b);
1157 case BKEY_PACKED_NONE:
1158 return bkey_cmp(((struct bkey *) l)->p,
1159 ((struct bkey *) r)->p);
1160 case BKEY_PACKED_LEFT:
1161 return __bkey_cmp_left_packed_format_checked(b,
1162 (struct bkey_packed *) l,
1163 &((struct bkey *) r)->p);
1164 case BKEY_PACKED_RIGHT:
1165 return -__bkey_cmp_left_packed_format_checked(b,
1166 (struct bkey_packed *) r,
1167 &((struct bkey *) l)->p);
1174 int bkey_cmp_left_packed(const struct btree *b,
1175 const struct bkey_packed *l, const struct bpos *r)
1177 const struct bkey *l_unpacked;
1179 return unlikely(l_unpacked = packed_to_bkey_c(l))
1180 ? bkey_cmp(l_unpacked->p, *r)
1181 : __bkey_cmp_left_packed_format_checked(b, l, r);
1184 void bch_bpos_swab(struct bpos *p)
1187 u8 *h = ((u8 *) &p[1]) - 1;
1196 void bch_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
1198 const struct bkey_format *f = bkey_packed(k) ? _f : &bch_bkey_format_current;
1199 u8 *l = k->key_start;
1200 u8 *h = (u8 *) (k->_data + f->key_u64s) - 1;
1209 #ifdef CONFIG_BCACHE_DEBUG
1210 void bkey_pack_test(void)
1212 struct bkey t = KEY(4134ULL, 1250629070527416633ULL, 0);
1213 struct bkey_packed p;
1215 struct bkey_format test_format = {
1224 struct unpack_state in_s =
1225 unpack_state_init(&bch_bkey_format_current, (void *) &t);
1226 struct pack_state out_s = pack_state_init(&test_format, &p);
1229 for (i = 0; i < out_s.format->nr_fields; i++) {
1230 u64 a, v = get_inc_field(&in_s, i);
1253 panic("got %llu actual %llu i %u\n", v, a, i);
1255 if (!set_inc_field(&out_s, i, v))
1256 panic("failed at %u\n", i);
1259 BUG_ON(!bkey_pack_key(&p, &t, &test_format));