1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
5 #include "btree_types.h"
6 #include "alloc_background.h"
16 const char * const bch2_bkey_types[] = {
17 #define x(name, nr) #name,
23 static const char *deleted_key_invalid(const struct bch_fs *c,
29 #define bch2_bkey_ops_deleted (struct bkey_ops) { \
30 .key_invalid = deleted_key_invalid, \
33 #define bch2_bkey_ops_discard (struct bkey_ops) { \
34 .key_invalid = deleted_key_invalid, \
37 static const char *empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k)
39 if (bkey_val_bytes(k.k))
40 return "value size should be zero";
45 #define bch2_bkey_ops_error (struct bkey_ops) { \
46 .key_invalid = empty_val_key_invalid, \
49 static const char *key_type_cookie_invalid(const struct bch_fs *c,
52 if (bkey_val_bytes(k.k) != sizeof(struct bch_cookie))
53 return "incorrect value size";
58 #define bch2_bkey_ops_cookie (struct bkey_ops) { \
59 .key_invalid = key_type_cookie_invalid, \
62 #define bch2_bkey_ops_hash_whiteout (struct bkey_ops) { \
63 .key_invalid = empty_val_key_invalid, \
66 static const char *key_type_inline_data_invalid(const struct bch_fs *c,
72 static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
75 struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
76 unsigned datalen = bkey_inline_data_bytes(k.k);
78 pr_buf(out, "datalen %u: %*phN",
79 datalen, min(datalen, 32U), d.v->data);
82 #define bch2_bkey_ops_inline_data (struct bkey_ops) { \
83 .key_invalid = key_type_inline_data_invalid, \
84 .val_to_text = key_type_inline_data_to_text, \
87 static const struct bkey_ops bch2_bkey_ops[] = {
88 #define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
93 const char *bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k)
95 if (k.k->type >= KEY_TYPE_MAX)
96 return "invalid type";
98 return bch2_bkey_ops[k.k->type].key_invalid(c, k);
101 const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
102 enum btree_node_type type)
104 if (k.k->u64s < BKEY_U64s)
105 return "u64s too small";
107 if (type == BKEY_TYPE_btree &&
108 bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
109 return "value too big";
111 if (btree_node_type_is_extents(type)) {
112 if ((k.k->size == 0) != bkey_deleted(k.k))
113 return "bad size field";
115 if (k.k->size > k.k->p.offset)
116 return "size greater than offset";
119 return "nonzero size field";
122 if (type != BKEY_TYPE_btree &&
123 !btree_type_has_snapshots(type) &&
125 return "nonzero snapshot";
127 if (type != BKEY_TYPE_btree &&
128 btree_type_has_snapshots(type) &&
129 k.k->p.snapshot != U32_MAX)
130 return "invalid snapshot field";
132 if (type != BKEY_TYPE_btree &&
133 !bkey_cmp(k.k->p, POS_MAX))
134 return "POS_MAX key";
139 const char *bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
140 enum btree_node_type type)
142 return __bch2_bkey_invalid(c, k, type) ?:
143 bch2_bkey_val_invalid(c, k);
146 const char *bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k)
148 if (bpos_cmp(k.k->p, b->data->min_key) < 0)
149 return "key before start of btree node";
151 if (bpos_cmp(k.k->p, b->data->max_key) > 0)
152 return "key past end of btree node";
157 void bch2_bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
163 invalid = bch2_bkey_invalid(c, k, btree_node_type(b)) ?:
164 bch2_bkey_in_btree_node(b, k);
168 bch2_bkey_val_to_text(&PBUF(buf), c, k);
169 bch2_fs_inconsistent(c, "invalid bkey %s: %s", buf, invalid);
173 void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
175 if (!bpos_cmp(pos, POS_MIN))
176 pr_buf(out, "POS_MIN");
177 else if (!bpos_cmp(pos, POS_MAX))
178 pr_buf(out, "POS_MAX");
180 if (pos.inode == U64_MAX)
181 pr_buf(out, "U64_MAX");
183 pr_buf(out, "%llu", pos.inode);
185 if (pos.offset == U64_MAX)
186 pr_buf(out, "U64_MAX");
188 pr_buf(out, "%llu", pos.offset);
190 if (pos.snapshot == U32_MAX)
191 pr_buf(out, "U32_MAX");
193 pr_buf(out, "%u", pos.snapshot);
197 void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
200 pr_buf(out, "u64s %u type ", k->u64s);
202 if (k->type < KEY_TYPE_MAX)
203 pr_buf(out, "%s ", bch2_bkey_types[k->type]);
205 pr_buf(out, "%u ", k->type);
207 bch2_bpos_to_text(out, k->p);
209 pr_buf(out, " len %u ver %llu", k->size, k->version.lo);
211 pr_buf(out, "(null)");
215 void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
218 if (k.k->type < KEY_TYPE_MAX) {
219 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
221 if (likely(ops->val_to_text))
222 ops->val_to_text(out, c, k);
224 pr_buf(out, "(invalid type %u)", k.k->type);
228 void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
231 bch2_bkey_to_text(out, k.k);
235 bch2_val_to_text(out, c, k);
239 void bch2_bkey_swab_val(struct bkey_s k)
241 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
247 bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
249 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
251 return ops->key_normalize
252 ? ops->key_normalize(c, k)
256 enum merge_result bch2_bkey_merge(struct bch_fs *c,
257 struct bkey_s l, struct bkey_s r)
259 const struct bkey_ops *ops = &bch2_bkey_ops[l.k->type];
260 enum merge_result ret;
262 if (bch2_key_merging_disabled ||
264 l.k->type != r.k->type ||
265 bversion_cmp(l.k->version, r.k->version) ||
266 bpos_cmp(l.k->p, bkey_start_pos(r.k)))
267 return BCH_MERGE_NOMERGE;
269 ret = ops->key_merge(c, l, r);
271 if (ret != BCH_MERGE_NOMERGE)
272 l.k->needs_whiteout |= r.k->needs_whiteout;
276 static const struct old_bkey_type {
280 } bkey_renumber_table[] = {
281 {BKEY_TYPE_btree, 128, KEY_TYPE_btree_ptr },
282 {BKEY_TYPE_extents, 128, KEY_TYPE_extent },
283 {BKEY_TYPE_extents, 129, KEY_TYPE_extent },
284 {BKEY_TYPE_extents, 130, KEY_TYPE_reservation },
285 {BKEY_TYPE_inodes, 128, KEY_TYPE_inode },
286 {BKEY_TYPE_inodes, 130, KEY_TYPE_inode_generation },
287 {BKEY_TYPE_dirents, 128, KEY_TYPE_dirent },
288 {BKEY_TYPE_dirents, 129, KEY_TYPE_hash_whiteout },
289 {BKEY_TYPE_xattrs, 128, KEY_TYPE_xattr },
290 {BKEY_TYPE_xattrs, 129, KEY_TYPE_hash_whiteout },
291 {BKEY_TYPE_alloc, 128, KEY_TYPE_alloc },
292 {BKEY_TYPE_quotas, 128, KEY_TYPE_quota },
295 void bch2_bkey_renumber(enum btree_node_type btree_node_type,
296 struct bkey_packed *k,
299 const struct old_bkey_type *i;
301 for (i = bkey_renumber_table;
302 i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table);
304 if (btree_node_type == i->btree_node_type &&
305 k->type == (write ? i->new : i->old)) {
306 k->type = write ? i->old : i->new;
311 void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
312 unsigned version, unsigned big_endian,
314 struct bkey_format *f,
315 struct bkey_packed *k)
317 const struct bkey_ops *ops;
320 unsigned nr_compat = 5;
324 * Do these operations in reverse order in the write path:
327 for (i = 0; i < nr_compat; i++)
328 switch (!write ? i : nr_compat - 1 - i) {
330 if (big_endian != CPU_BIG_ENDIAN)
331 bch2_bkey_swab_key(f, k);
334 if (version < bcachefs_metadata_version_bkey_renumber)
335 bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write);
338 if (version < bcachefs_metadata_version_inode_btree_change &&
339 btree_id == BTREE_ID_inodes) {
340 if (!bkey_packed(k)) {
341 struct bkey_i *u = packed_to_bkey(k);
342 swap(u->k.p.inode, u->k.p.offset);
343 } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
344 f->bits_per_field[BKEY_FIELD_OFFSET]) {
345 struct bkey_format tmp = *f, *in = f, *out = &tmp;
347 swap(tmp.bits_per_field[BKEY_FIELD_INODE],
348 tmp.bits_per_field[BKEY_FIELD_OFFSET]);
349 swap(tmp.field_offset[BKEY_FIELD_INODE],
350 tmp.field_offset[BKEY_FIELD_OFFSET]);
355 uk = __bch2_bkey_unpack_key(in, k);
356 swap(uk.p.inode, uk.p.offset);
357 BUG_ON(!bch2_bkey_pack_key(k, &uk, out));
362 if (version < bcachefs_metadata_version_snapshot &&
363 (level || btree_type_has_snapshots(btree_id))) {
364 struct bkey_i *u = packed_to_bkey(k);
367 u->k.p.snapshot = write
370 u64 min_packed = f->field_offset[BKEY_FIELD_SNAPSHOT];
371 u64 max_packed = min_packed +
372 ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
374 uk = __bch2_bkey_unpack_key(f, k);
375 uk.p.snapshot = write
376 ? min_packed : min_t(u64, U32_MAX, max_packed);
378 BUG_ON(!bch2_bkey_pack_key(k, &uk, f));
384 if (!bkey_packed(k)) {
385 u = bkey_i_to_s(packed_to_bkey(k));
387 uk = __bch2_bkey_unpack_key(f, k);
389 u.v = bkeyp_val(f, k);
392 if (big_endian != CPU_BIG_ENDIAN)
393 bch2_bkey_swab_val(u);
395 ops = &bch2_bkey_ops[k->type];
398 ops->compat(btree_id, version, big_endian, write, u);