1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
5 #include "btree_types.h"
6 #include "alloc_background.h"
16 const char * const bch2_bkey_types[] = {
17 #define x(name, nr) #name,
23 static const char *deleted_key_invalid(const struct bch_fs *c,
29 #define bch2_bkey_ops_deleted (struct bkey_ops) { \
30 .key_invalid = deleted_key_invalid, \
33 #define bch2_bkey_ops_discard (struct bkey_ops) { \
34 .key_invalid = deleted_key_invalid, \
37 static const char *empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k)
39 if (bkey_val_bytes(k.k))
40 return "value size should be zero";
45 #define bch2_bkey_ops_error (struct bkey_ops) { \
46 .key_invalid = empty_val_key_invalid, \
49 static const char *key_type_cookie_invalid(const struct bch_fs *c,
52 if (bkey_val_bytes(k.k) != sizeof(struct bch_cookie))
53 return "incorrect value size";
58 #define bch2_bkey_ops_cookie (struct bkey_ops) { \
59 .key_invalid = key_type_cookie_invalid, \
62 #define bch2_bkey_ops_whiteout (struct bkey_ops) { \
63 .key_invalid = empty_val_key_invalid, \
66 static const char *key_type_inline_data_invalid(const struct bch_fs *c,
72 static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
75 pr_buf(out, "(%zu bytes)", bkey_val_bytes(k.k));
78 #define bch2_bkey_ops_inline_data (struct bkey_ops) { \
79 .key_invalid = key_type_inline_data_invalid, \
80 .val_to_text = key_type_inline_data_to_text, \
83 static const struct bkey_ops bch2_bkey_ops[] = {
84 #define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
89 const char *bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k)
91 if (k.k->type >= KEY_TYPE_MAX)
92 return "invalid type";
94 return bch2_bkey_ops[k.k->type].key_invalid(c, k);
97 const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
98 enum btree_node_type type)
100 if (k.k->u64s < BKEY_U64s)
101 return "u64s too small";
103 if (type == BKEY_TYPE_BTREE &&
104 bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
105 return "value too big";
107 if (btree_node_type_is_extents(type)) {
108 if ((k.k->size == 0) != bkey_deleted(k.k))
109 return "bad size field";
111 if (k.k->size > k.k->p.offset)
112 return "size greater than offset";
115 return "nonzero size field";
119 return "nonzero snapshot";
121 if (type != BKEY_TYPE_BTREE &&
122 !bkey_cmp(k.k->p, POS_MAX))
123 return "POS_MAX key";
128 const char *bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
129 enum btree_node_type type)
131 return __bch2_bkey_invalid(c, k, type) ?:
132 bch2_bkey_val_invalid(c, k);
135 const char *bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k)
137 if (bkey_cmp(k.k->p, b->data->min_key) < 0)
138 return "key before start of btree node";
140 if (bkey_cmp(k.k->p, b->data->max_key) > 0)
141 return "key past end of btree node";
146 void bch2_bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
148 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
153 invalid = bch2_bkey_invalid(c, k, btree_node_type(b)) ?:
154 bch2_bkey_in_btree_node(b, k);
158 bch2_bkey_val_to_text(&PBUF(buf), c, k);
159 bch2_fs_inconsistent(c, "invalid bkey %s: %s", buf, invalid);
163 if (ops->key_debugcheck)
164 ops->key_debugcheck(c, k);
167 void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
169 if (!bkey_cmp(pos, POS_MIN))
170 pr_buf(out, "POS_MIN");
171 else if (!bkey_cmp(pos, POS_MAX))
172 pr_buf(out, "POS_MAX");
174 pr_buf(out, "%llu:%llu", pos.inode, pos.offset);
177 void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
180 pr_buf(out, "u64s %u type %s ", k->u64s,
181 bch2_bkey_types[k->type]);
183 bch2_bpos_to_text(out, k->p);
185 pr_buf(out, " snap %u len %u ver %llu",
186 k->p.snapshot, k->size, k->version.lo);
188 pr_buf(out, "(null)");
192 void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
195 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
197 if (likely(ops->val_to_text))
198 ops->val_to_text(out, c, k);
201 void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
204 bch2_bkey_to_text(out, k.k);
208 bch2_val_to_text(out, c, k);
212 void bch2_bkey_swab_val(struct bkey_s k)
214 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
220 bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
222 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
224 return ops->key_normalize
225 ? ops->key_normalize(c, k)
229 enum merge_result bch2_bkey_merge(struct bch_fs *c,
230 struct bkey_s l, struct bkey_s r)
232 const struct bkey_ops *ops = &bch2_bkey_ops[l.k->type];
233 enum merge_result ret;
235 if (key_merging_disabled(c) ||
237 l.k->type != r.k->type ||
238 bversion_cmp(l.k->version, r.k->version) ||
239 bkey_cmp(l.k->p, bkey_start_pos(r.k)))
240 return BCH_MERGE_NOMERGE;
242 ret = ops->key_merge(c, l, r);
244 if (ret != BCH_MERGE_NOMERGE)
245 l.k->needs_whiteout |= r.k->needs_whiteout;
249 static const struct old_bkey_type {
253 } bkey_renumber_table[] = {
254 {BKEY_TYPE_BTREE, 128, KEY_TYPE_btree_ptr },
255 {BKEY_TYPE_EXTENTS, 128, KEY_TYPE_extent },
256 {BKEY_TYPE_EXTENTS, 129, KEY_TYPE_extent },
257 {BKEY_TYPE_EXTENTS, 130, KEY_TYPE_reservation },
258 {BKEY_TYPE_INODES, 128, KEY_TYPE_inode },
259 {BKEY_TYPE_INODES, 130, KEY_TYPE_inode_generation },
260 {BKEY_TYPE_DIRENTS, 128, KEY_TYPE_dirent },
261 {BKEY_TYPE_DIRENTS, 129, KEY_TYPE_whiteout },
262 {BKEY_TYPE_XATTRS, 128, KEY_TYPE_xattr },
263 {BKEY_TYPE_XATTRS, 129, KEY_TYPE_whiteout },
264 {BKEY_TYPE_ALLOC, 128, KEY_TYPE_alloc },
265 {BKEY_TYPE_QUOTAS, 128, KEY_TYPE_quota },
268 void bch2_bkey_renumber(enum btree_node_type btree_node_type,
269 struct bkey_packed *k,
272 const struct old_bkey_type *i;
274 for (i = bkey_renumber_table;
275 i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table);
277 if (btree_node_type == i->btree_node_type &&
278 k->type == (write ? i->new : i->old)) {
279 k->type = write ? i->old : i->new;
284 void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
285 unsigned version, unsigned big_endian,
287 struct bkey_format *f,
288 struct bkey_packed *k)
290 const struct bkey_ops *ops;
296 * Do these operations in reverse order in the write path:
299 for (i = 0; i < 4; i++)
300 switch (!write ? i : 3 - i) {
302 if (big_endian != CPU_BIG_ENDIAN)
303 bch2_bkey_swab_key(f, k);
306 if (version < bcachefs_metadata_version_bkey_renumber)
307 bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write);
310 if (version < bcachefs_metadata_version_inode_btree_change &&
311 btree_id == BTREE_ID_INODES) {
312 if (!bkey_packed(k)) {
313 struct bkey_i *u = packed_to_bkey(k);
314 swap(u->k.p.inode, u->k.p.offset);
315 } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
316 f->bits_per_field[BKEY_FIELD_OFFSET]) {
317 struct bkey_format tmp = *f, *in = f, *out = &tmp;
319 swap(tmp.bits_per_field[BKEY_FIELD_INODE],
320 tmp.bits_per_field[BKEY_FIELD_OFFSET]);
321 swap(tmp.field_offset[BKEY_FIELD_INODE],
322 tmp.field_offset[BKEY_FIELD_OFFSET]);
327 uk = __bch2_bkey_unpack_key(in, k);
328 swap(uk.p.inode, uk.p.offset);
329 BUG_ON(!bch2_bkey_pack_key(k, &uk, out));
334 if (!bkey_packed(k)) {
335 u = bkey_i_to_s(packed_to_bkey(k));
337 uk = __bch2_bkey_unpack_key(f, k);
339 u.v = bkeyp_val(f, k);
342 if (big_endian != CPU_BIG_ENDIAN)
343 bch2_bkey_swab_val(u);
345 ops = &bch2_bkey_ops[k->type];
348 ops->compat(btree_id, version, big_endian, write, u);