+ ops->swab(k);
+}
+
+bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
+{
+ const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
+
+ return ops->key_normalize
+ ? ops->key_normalize(c, k)
+ : false;
+}
+
+bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
+{
+ const struct bkey_ops *ops = &bch2_bkey_ops[l.k->type];
+
+ return bch2_bkey_maybe_mergable(l.k, r.k) &&
+ (u64) l.k->size + r.k->size <= KEY_SIZE_MAX &&
+ bch2_bkey_ops[l.k->type].key_merge &&
+ !bch2_key_merging_disabled &&
+ ops->key_merge(c, l, r);
+}
+
+static const struct old_bkey_type {
+ u8 btree_node_type;
+ u8 old;
+ u8 new;
+} bkey_renumber_table[] = {
+ {BKEY_TYPE_btree, 128, KEY_TYPE_btree_ptr },
+ {BKEY_TYPE_extents, 128, KEY_TYPE_extent },
+ {BKEY_TYPE_extents, 129, KEY_TYPE_extent },
+ {BKEY_TYPE_extents, 130, KEY_TYPE_reservation },
+ {BKEY_TYPE_inodes, 128, KEY_TYPE_inode },
+ {BKEY_TYPE_inodes, 130, KEY_TYPE_inode_generation },
+ {BKEY_TYPE_dirents, 128, KEY_TYPE_dirent },
+ {BKEY_TYPE_dirents, 129, KEY_TYPE_hash_whiteout },
+ {BKEY_TYPE_xattrs, 128, KEY_TYPE_xattr },
+ {BKEY_TYPE_xattrs, 129, KEY_TYPE_hash_whiteout },
+ {BKEY_TYPE_alloc, 128, KEY_TYPE_alloc },
+ {BKEY_TYPE_quotas, 128, KEY_TYPE_quota },
+};
+
+void bch2_bkey_renumber(enum btree_node_type btree_node_type,
+ struct bkey_packed *k,
+ int write)
+{
+ const struct old_bkey_type *i;
+
+ for (i = bkey_renumber_table;
+ i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table);
+ i++)
+ if (btree_node_type == i->btree_node_type &&
+ k->type == (write ? i->new : i->old)) {
+ k->type = write ? i->old : i->new;
+ break;
+ }
+}
+
+void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
+ unsigned version, unsigned big_endian,
+ int write,
+ struct bkey_format *f,
+ struct bkey_packed *k)
+{
+ const struct bkey_ops *ops;
+ struct bkey uk;
+ struct bkey_s u;
+ unsigned nr_compat = 5;
+ int i;
+
+ /*
+ * Do these operations in reverse order in the write path:
+ */
+
+ for (i = 0; i < nr_compat; i++)
+ switch (!write ? i : nr_compat - 1 - i) {
+ case 0:
+ if (big_endian != CPU_BIG_ENDIAN)
+ bch2_bkey_swab_key(f, k);
+ break;
+ case 1:
+ if (version < bcachefs_metadata_version_bkey_renumber)
+ bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write);
+ break;
+ case 2:
+ if (version < bcachefs_metadata_version_inode_btree_change &&
+ btree_id == BTREE_ID_inodes) {
+ if (!bkey_packed(k)) {
+ struct bkey_i *u = packed_to_bkey(k);
+
+ swap(u->k.p.inode, u->k.p.offset);
+ } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
+ f->bits_per_field[BKEY_FIELD_OFFSET]) {
+ struct bkey_format tmp = *f, *in = f, *out = &tmp;
+
+ swap(tmp.bits_per_field[BKEY_FIELD_INODE],
+ tmp.bits_per_field[BKEY_FIELD_OFFSET]);
+ swap(tmp.field_offset[BKEY_FIELD_INODE],
+ tmp.field_offset[BKEY_FIELD_OFFSET]);
+
+ if (!write)
+ swap(in, out);
+
+ uk = __bch2_bkey_unpack_key(in, k);
+ swap(uk.p.inode, uk.p.offset);
+ BUG_ON(!bch2_bkey_pack_key(k, &uk, out));
+ }
+ }
+ break;
+ case 3:
+ if (version < bcachefs_metadata_version_snapshot &&
+ (level || btree_type_has_snapshots(btree_id))) {
+ struct bkey_i *u = packed_to_bkey(k);
+
+ if (u) {
+ u->k.p.snapshot = write
+ ? 0 : U32_MAX;
+ } else {
+ u64 min_packed = f->field_offset[BKEY_FIELD_SNAPSHOT];
+ u64 max_packed = min_packed +
+ ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
+
+ uk = __bch2_bkey_unpack_key(f, k);
+ uk.p.snapshot = write
+ ? min_packed : min_t(u64, U32_MAX, max_packed);
+
+ BUG_ON(!bch2_bkey_pack_key(k, &uk, f));
+ }
+ }
+
+ break;
+ case 4:
+ if (!bkey_packed(k)) {
+ u = bkey_i_to_s(packed_to_bkey(k));
+ } else {
+ uk = __bch2_bkey_unpack_key(f, k);
+ u.k = &uk;
+ u.v = bkeyp_val(f, k);
+ }
+
+ if (big_endian != CPU_BIG_ENDIAN)
+ bch2_bkey_swab_val(u);
+
+ ops = &bch2_bkey_ops[k->type];
+
+ if (ops->compat)
+ ops->compat(btree_id, version, big_endian, write, u);
+ break;
+ default:
+ BUG();
+ }