1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
5 #include "btree_types.h"
6 #include "alloc_background.h"
16 const char * const bch2_bkey_types[] = {
17 #define x(name, nr) #name,
23 static const char *deleted_key_invalid(const struct bch_fs *c,
29 #define bch2_bkey_ops_deleted (struct bkey_ops) { \
30 .key_invalid = deleted_key_invalid, \
33 #define bch2_bkey_ops_discard (struct bkey_ops) { \
34 .key_invalid = deleted_key_invalid, \
37 static const char *empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k)
39 if (bkey_val_bytes(k.k))
40 return "value size should be zero";
45 #define bch2_bkey_ops_error (struct bkey_ops) { \
46 .key_invalid = empty_val_key_invalid, \
49 static const char *key_type_cookie_invalid(const struct bch_fs *c,
52 if (bkey_val_bytes(k.k) != sizeof(struct bch_cookie))
53 return "incorrect value size";
58 #define bch2_bkey_ops_cookie (struct bkey_ops) { \
59 .key_invalid = key_type_cookie_invalid, \
62 #define bch2_bkey_ops_whiteout (struct bkey_ops) { \
63 .key_invalid = empty_val_key_invalid, \
66 static const char *key_type_inline_data_invalid(const struct bch_fs *c,
72 static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
75 struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
76 unsigned datalen = bkey_inline_data_bytes(k.k);
78 pr_buf(out, "datalen %u: %*phN",
79 datalen, min(datalen, 32U), d.v->data);
82 #define bch2_bkey_ops_inline_data (struct bkey_ops) { \
83 .key_invalid = key_type_inline_data_invalid, \
84 .val_to_text = key_type_inline_data_to_text, \
87 static const struct bkey_ops bch2_bkey_ops[] = {
88 #define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
93 const char *bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k)
95 if (k.k->type >= KEY_TYPE_MAX)
96 return "invalid type";
98 return bch2_bkey_ops[k.k->type].key_invalid(c, k);
101 const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
102 enum btree_node_type type)
104 if (k.k->u64s < BKEY_U64s)
105 return "u64s too small";
107 if (type == BKEY_TYPE_BTREE &&
108 bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
109 return "value too big";
111 if (btree_node_type_is_extents(type)) {
112 if ((k.k->size == 0) != bkey_deleted(k.k))
113 return "bad size field";
115 if (k.k->size > k.k->p.offset)
116 return "size greater than offset";
119 return "nonzero size field";
123 return "nonzero snapshot";
125 if (type != BKEY_TYPE_BTREE &&
126 !bkey_cmp(k.k->p, POS_MAX))
127 return "POS_MAX key";
132 const char *bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
133 enum btree_node_type type)
135 return __bch2_bkey_invalid(c, k, type) ?:
136 bch2_bkey_val_invalid(c, k);
139 const char *bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k)
141 if (bkey_cmp(k.k->p, b->data->min_key) < 0)
142 return "key before start of btree node";
144 if (bkey_cmp(k.k->p, b->data->max_key) > 0)
145 return "key past end of btree node";
150 void bch2_bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
152 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
157 invalid = bch2_bkey_invalid(c, k, btree_node_type(b)) ?:
158 bch2_bkey_in_btree_node(b, k);
162 bch2_bkey_val_to_text(&PBUF(buf), c, k);
163 bch2_fs_inconsistent(c, "invalid bkey %s: %s", buf, invalid);
167 if (ops->key_debugcheck)
168 ops->key_debugcheck(c, k);
171 void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
173 if (!bkey_cmp(pos, POS_MIN))
174 pr_buf(out, "POS_MIN");
175 else if (!bkey_cmp(pos, POS_MAX))
176 pr_buf(out, "POS_MAX");
178 pr_buf(out, "%llu:%llu", pos.inode, pos.offset);
181 void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
184 pr_buf(out, "u64s %u type %s ", k->u64s,
185 bch2_bkey_types[k->type]);
187 bch2_bpos_to_text(out, k->p);
189 pr_buf(out, " snap %u len %u ver %llu",
190 k->p.snapshot, k->size, k->version.lo);
192 pr_buf(out, "(null)");
196 void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
199 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
201 if (likely(ops->val_to_text))
202 ops->val_to_text(out, c, k);
205 void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
208 bch2_bkey_to_text(out, k.k);
212 bch2_val_to_text(out, c, k);
216 void bch2_bkey_swab_val(struct bkey_s k)
218 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
224 bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
226 const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
228 return ops->key_normalize
229 ? ops->key_normalize(c, k)
233 enum merge_result bch2_bkey_merge(struct bch_fs *c,
234 struct bkey_s l, struct bkey_s r)
236 const struct bkey_ops *ops = &bch2_bkey_ops[l.k->type];
237 enum merge_result ret;
239 if (key_merging_disabled(c) ||
241 l.k->type != r.k->type ||
242 bversion_cmp(l.k->version, r.k->version) ||
243 bkey_cmp(l.k->p, bkey_start_pos(r.k)))
244 return BCH_MERGE_NOMERGE;
246 ret = ops->key_merge(c, l, r);
248 if (ret != BCH_MERGE_NOMERGE)
249 l.k->needs_whiteout |= r.k->needs_whiteout;
253 static const struct old_bkey_type {
257 } bkey_renumber_table[] = {
258 {BKEY_TYPE_BTREE, 128, KEY_TYPE_btree_ptr },
259 {BKEY_TYPE_EXTENTS, 128, KEY_TYPE_extent },
260 {BKEY_TYPE_EXTENTS, 129, KEY_TYPE_extent },
261 {BKEY_TYPE_EXTENTS, 130, KEY_TYPE_reservation },
262 {BKEY_TYPE_INODES, 128, KEY_TYPE_inode },
263 {BKEY_TYPE_INODES, 130, KEY_TYPE_inode_generation },
264 {BKEY_TYPE_DIRENTS, 128, KEY_TYPE_dirent },
265 {BKEY_TYPE_DIRENTS, 129, KEY_TYPE_whiteout },
266 {BKEY_TYPE_XATTRS, 128, KEY_TYPE_xattr },
267 {BKEY_TYPE_XATTRS, 129, KEY_TYPE_whiteout },
268 {BKEY_TYPE_ALLOC, 128, KEY_TYPE_alloc },
269 {BKEY_TYPE_QUOTAS, 128, KEY_TYPE_quota },
272 void bch2_bkey_renumber(enum btree_node_type btree_node_type,
273 struct bkey_packed *k,
276 const struct old_bkey_type *i;
278 for (i = bkey_renumber_table;
279 i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table);
281 if (btree_node_type == i->btree_node_type &&
282 k->type == (write ? i->new : i->old)) {
283 k->type = write ? i->old : i->new;
288 void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
289 unsigned version, unsigned big_endian,
291 struct bkey_format *f,
292 struct bkey_packed *k)
294 const struct bkey_ops *ops;
300 * Do these operations in reverse order in the write path:
303 for (i = 0; i < 4; i++)
304 switch (!write ? i : 3 - i) {
306 if (big_endian != CPU_BIG_ENDIAN)
307 bch2_bkey_swab_key(f, k);
310 if (version < bcachefs_metadata_version_bkey_renumber)
311 bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write);
314 if (version < bcachefs_metadata_version_inode_btree_change &&
315 btree_id == BTREE_ID_INODES) {
316 if (!bkey_packed(k)) {
317 struct bkey_i *u = packed_to_bkey(k);
318 swap(u->k.p.inode, u->k.p.offset);
319 } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
320 f->bits_per_field[BKEY_FIELD_OFFSET]) {
321 struct bkey_format tmp = *f, *in = f, *out = &tmp;
323 swap(tmp.bits_per_field[BKEY_FIELD_INODE],
324 tmp.bits_per_field[BKEY_FIELD_OFFSET]);
325 swap(tmp.field_offset[BKEY_FIELD_INODE],
326 tmp.field_offset[BKEY_FIELD_OFFSET]);
331 uk = __bch2_bkey_unpack_key(in, k);
332 swap(uk.p.inode, uk.p.offset);
333 BUG_ON(!bch2_bkey_pack_key(k, &uk, out));
338 if (!bkey_packed(k)) {
339 u = bkey_i_to_s(packed_to_bkey(k));
341 uk = __bch2_bkey_unpack_key(f, k);
343 u.v = bkeyp_val(f, k);
346 if (big_endian != CPU_BIG_ENDIAN)
347 bch2_bkey_swab_val(u);
349 ops = &bch2_bkey_ops[k->type];
352 ops->compat(btree_id, version, big_endian, write, u);