1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
5 #include "btree_types.h"
6 #include "alloc_background.h"
16 const char * const bch2_bkey_types[] = {
17 #define x(name, nr) #name,
23 static const char *deleted_key_invalid(const struct bch_fs *c,
29 #define bch2_bkey_ops_deleted (struct bkey_ops) { \
30 .key_invalid = deleted_key_invalid, \
33 #define bch2_bkey_ops_discard (struct bkey_ops) { \
34 .key_invalid = deleted_key_invalid, \
37 static const char *empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k)
39 if (bkey_val_bytes(k.k))
40 return "value size should be zero";
45 #define bch2_bkey_ops_error (struct bkey_ops) { \
46 .key_invalid = empty_val_key_invalid, \
49 static const char *key_type_cookie_invalid(const struct bch_fs *c,
52 if (bkey_val_bytes(k.k) != sizeof(struct bch_cookie))
53 return "incorrect value size";
58 #define bch2_bkey_ops_cookie (struct bkey_ops) { \
59 .key_invalid = key_type_cookie_invalid, \
62 #define bch2_bkey_ops_hash_whiteout (struct bkey_ops) { \
63 .key_invalid = empty_val_key_invalid, \
66 static const char *key_type_inline_data_invalid(const struct bch_fs *c,
72 static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
75 struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
76 unsigned datalen = bkey_inline_data_bytes(k.k);
78 pr_buf(out, "datalen %u: %*phN",
79 datalen, min(datalen, 32U), d.v->data);
82 #define bch2_bkey_ops_inline_data (struct bkey_ops) { \
83 .key_invalid = key_type_inline_data_invalid, \
84 .val_to_text = key_type_inline_data_to_text, \
87 const struct bkey_ops bch2_bkey_ops[] = {
88 #define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
93 const char *bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k)
95 if (k.k->type >= KEY_TYPE_MAX)
96 return "invalid type";
98 return bch2_bkey_ops[k.k->type].key_invalid(c, k);
101 static unsigned bch2_key_types_allowed[] = {
102 [BKEY_TYPE_extents] =
103 (1U << KEY_TYPE_error)|
104 (1U << KEY_TYPE_cookie)|
105 (1U << KEY_TYPE_extent)|
106 (1U << KEY_TYPE_reservation)|
107 (1U << KEY_TYPE_reflink_p)|
108 (1U << KEY_TYPE_inline_data),
110 (1U << KEY_TYPE_inode)|
111 (1U << KEY_TYPE_inode_generation),
112 [BKEY_TYPE_dirents] =
113 (1U << KEY_TYPE_hash_whiteout)|
114 (1U << KEY_TYPE_dirent),
116 (1U << KEY_TYPE_cookie)|
117 (1U << KEY_TYPE_hash_whiteout)|
118 (1U << KEY_TYPE_xattr),
120 (1U << KEY_TYPE_alloc)|
121 (1U << KEY_TYPE_alloc_v2),
123 (1U << KEY_TYPE_quota),
124 [BKEY_TYPE_stripes] =
125 (1U << KEY_TYPE_stripe),
126 [BKEY_TYPE_reflink] =
127 (1U << KEY_TYPE_reflink_v)|
128 (1U << KEY_TYPE_indirect_inline_data),
130 (1U << KEY_TYPE_btree_ptr)|
131 (1U << KEY_TYPE_btree_ptr_v2),
134 const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
135 enum btree_node_type type)
137 unsigned key_types_allowed = (1U << KEY_TYPE_deleted)|
138 bch2_key_types_allowed[type] ;
140 if (k.k->u64s < BKEY_U64s)
141 return "u64s too small";
143 if (!(key_types_allowed & (1U << k.k->type)))
144 return "invalid key type for this btree";
146 if (type == BKEY_TYPE_btree &&
147 bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
148 return "value too big";
150 if (btree_node_type_is_extents(type)) {
151 if ((k.k->size == 0) != bkey_deleted(k.k))
152 return "bad size field";
154 if (k.k->size > k.k->p.offset)
155 return "size greater than offset";
158 return "nonzero size field";
161 if (type != BKEY_TYPE_btree &&
162 !btree_type_has_snapshots(type) &&
164 return "nonzero snapshot";
166 if (type != BKEY_TYPE_btree &&
167 btree_type_has_snapshots(type) &&
168 k.k->p.snapshot != U32_MAX)
169 return "invalid snapshot field";
171 if (type != BKEY_TYPE_btree &&
172 !bkey_cmp(k.k->p, POS_MAX))
173 return "POS_MAX key";
178 const char *bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
179 enum btree_node_type type)
181 return __bch2_bkey_invalid(c, k, type) ?:
182 bch2_bkey_val_invalid(c, k);
185 const char *bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k)
187 if (bpos_cmp(k.k->p, b->data->min_key) < 0)
188 return "key before start of btree node";
190 if (bpos_cmp(k.k->p, b->data->max_key) > 0)
191 return "key past end of btree node";
196 void bch2_bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
202 invalid = bch2_bkey_invalid(c, k, btree_node_type(b)) ?:
203 bch2_bkey_in_btree_node(b, k);
207 bch2_bkey_val_to_text(&PBUF(buf), c, k);
208 bch2_fs_inconsistent(c, "invalid bkey %s: %s", buf, invalid);
212 void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
214 if (!bpos_cmp(pos, POS_MIN))
215 pr_buf(out, "POS_MIN");
216 else if (!bpos_cmp(pos, POS_MAX))
217 pr_buf(out, "POS_MAX");
219 if (pos.inode == U64_MAX)
220 pr_buf(out, "U64_MAX");
222 pr_buf(out, "%llu", pos.inode);
224 if (pos.offset == U64_MAX)
225 pr_buf(out, "U64_MAX");
227 pr_buf(out, "%llu", pos.offset);
229 if (pos.snapshot == U32_MAX)
230 pr_buf(out, "U32_MAX");
232 pr_buf(out, "%u", pos.snapshot);
236 void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
239 pr_buf(out, "u64s %u type ", k->u64s);
241 if (k->type < KEY_TYPE_MAX)
242 pr_buf(out, "%s ", bch2_bkey_types[k->type]);
244 pr_buf(out, "%u ", k->type);
246 bch2_bpos_to_text(out, k->p);
248 pr_buf(out, " len %u ver %llu", k->size, k->version.lo);
250 pr_buf(out, "(null)");
254 void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
257 if (k.k->type < KEY_TYPE_MAX) {
258 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
260 if (likely(ops->val_to_text))
261 ops->val_to_text(out, c, k);
263 pr_buf(out, "(invalid type %u)", k.k->type);
267 void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
270 bch2_bkey_to_text(out, k.k);
272 if (bkey_val_bytes(k.k)) {
274 bch2_val_to_text(out, c, k);
278 void bch2_bkey_swab_val(struct bkey_s k)
280 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
286 bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
288 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
290 return ops->key_normalize
291 ? ops->key_normalize(c, k)
295 bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
297 const struct bkey_ops *ops = &bch2_bkey_ops[l.k->type];
299 return bch2_bkey_maybe_mergable(l.k, r.k) && ops->key_merge(c, l, r);
302 static const struct old_bkey_type {
306 } bkey_renumber_table[] = {
307 {BKEY_TYPE_btree, 128, KEY_TYPE_btree_ptr },
308 {BKEY_TYPE_extents, 128, KEY_TYPE_extent },
309 {BKEY_TYPE_extents, 129, KEY_TYPE_extent },
310 {BKEY_TYPE_extents, 130, KEY_TYPE_reservation },
311 {BKEY_TYPE_inodes, 128, KEY_TYPE_inode },
312 {BKEY_TYPE_inodes, 130, KEY_TYPE_inode_generation },
313 {BKEY_TYPE_dirents, 128, KEY_TYPE_dirent },
314 {BKEY_TYPE_dirents, 129, KEY_TYPE_hash_whiteout },
315 {BKEY_TYPE_xattrs, 128, KEY_TYPE_xattr },
316 {BKEY_TYPE_xattrs, 129, KEY_TYPE_hash_whiteout },
317 {BKEY_TYPE_alloc, 128, KEY_TYPE_alloc },
318 {BKEY_TYPE_quotas, 128, KEY_TYPE_quota },
321 void bch2_bkey_renumber(enum btree_node_type btree_node_type,
322 struct bkey_packed *k,
325 const struct old_bkey_type *i;
327 for (i = bkey_renumber_table;
328 i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table);
330 if (btree_node_type == i->btree_node_type &&
331 k->type == (write ? i->new : i->old)) {
332 k->type = write ? i->old : i->new;
337 void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
338 unsigned version, unsigned big_endian,
340 struct bkey_format *f,
341 struct bkey_packed *k)
343 const struct bkey_ops *ops;
346 unsigned nr_compat = 5;
350 * Do these operations in reverse order in the write path:
353 for (i = 0; i < nr_compat; i++)
354 switch (!write ? i : nr_compat - 1 - i) {
356 if (big_endian != CPU_BIG_ENDIAN)
357 bch2_bkey_swab_key(f, k);
360 if (version < bcachefs_metadata_version_bkey_renumber)
361 bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write);
364 if (version < bcachefs_metadata_version_inode_btree_change &&
365 btree_id == BTREE_ID_inodes) {
366 if (!bkey_packed(k)) {
367 struct bkey_i *u = packed_to_bkey(k);
368 swap(u->k.p.inode, u->k.p.offset);
369 } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
370 f->bits_per_field[BKEY_FIELD_OFFSET]) {
371 struct bkey_format tmp = *f, *in = f, *out = &tmp;
373 swap(tmp.bits_per_field[BKEY_FIELD_INODE],
374 tmp.bits_per_field[BKEY_FIELD_OFFSET]);
375 swap(tmp.field_offset[BKEY_FIELD_INODE],
376 tmp.field_offset[BKEY_FIELD_OFFSET]);
381 uk = __bch2_bkey_unpack_key(in, k);
382 swap(uk.p.inode, uk.p.offset);
383 BUG_ON(!bch2_bkey_pack_key(k, &uk, out));
388 if (version < bcachefs_metadata_version_snapshot &&
389 (level || btree_type_has_snapshots(btree_id))) {
390 struct bkey_i *u = packed_to_bkey(k);
393 u->k.p.snapshot = write
396 u64 min_packed = f->field_offset[BKEY_FIELD_SNAPSHOT];
397 u64 max_packed = min_packed +
398 ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
400 uk = __bch2_bkey_unpack_key(f, k);
401 uk.p.snapshot = write
402 ? min_packed : min_t(u64, U32_MAX, max_packed);
404 BUG_ON(!bch2_bkey_pack_key(k, &uk, f));
410 if (!bkey_packed(k)) {
411 u = bkey_i_to_s(packed_to_bkey(k));
413 uk = __bch2_bkey_unpack_key(f, k);
415 u.v = bkeyp_val(f, k);
418 if (big_endian != CPU_BIG_ENDIAN)
419 bch2_bkey_swab_val(u);
421 ops = &bch2_bkey_ops[k->type];
424 ops->compat(btree_id, version, big_endian, write, u);