2 * Assorted bcachefs debug code
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
9 #include "bkey_methods.h"
10 #include "btree_cache.h"
12 #include "btree_iter.h"
13 #include "btree_update.h"
23 #include <linux/console.h>
24 #include <linux/debugfs.h>
25 #include <linux/module.h>
26 #include <linux/random.h>
27 #include <linux/seq_file.h>
29 static struct dentry *bch_debug;
31 #ifdef CONFIG_BCACHEFS_DEBUG
33 void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
35 struct btree *v = c->verify_data;
36 struct btree_node *n_ondisk, *n_sorted, *n_inmemory;
37 struct bset *sorted, *inmemory;
38 struct extent_pick_ptr pick;
42 if (c->opts.nochanges)
45 btree_node_io_lock(b);
46 mutex_lock(&c->verify_lock);
48 n_ondisk = c->verify_ondisk;
49 n_sorted = c->verify_data->data;
52 bkey_copy(&v->key, &b->key);
55 v->btree_id = b->btree_id;
56 bch2_btree_keys_init(v, &c->expensive_debug_checks);
58 if (bch2_btree_pick_ptr(c, b, NULL, &pick) <= 0)
61 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
62 if (!bch2_dev_get_ioref(ca, READ))
65 bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio);
66 bio_set_dev(bio, ca->disk_sb.bdev);
67 bio->bi_opf = REQ_OP_READ|REQ_META;
68 bio->bi_iter.bi_sector = pick.ptr.offset;
69 bio->bi_iter.bi_size = btree_bytes(c);
70 bch2_bio_map(bio, n_sorted);
75 percpu_ref_put(&ca->io_ref);
77 memcpy(n_ondisk, n_sorted, btree_bytes(c));
79 if (bch2_btree_node_read_done(c, v, false))
82 n_sorted = c->verify_data->data;
83 sorted = &n_sorted->keys;
84 inmemory = &n_inmemory->keys;
86 if (inmemory->u64s != sorted->u64s ||
87 memcmp(inmemory->start,
89 vstruct_end(inmemory) - (void *) inmemory->start)) {
90 unsigned offset = 0, sectors;
96 printk(KERN_ERR "*** in memory:\n");
97 bch2_dump_bset(b, inmemory, 0);
99 printk(KERN_ERR "*** read back in:\n");
100 bch2_dump_bset(v, sorted, 0);
102 while (offset < b->written) {
105 sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
108 struct btree_node_entry *bne =
109 (void *) n_ondisk + (offset << 9);
112 sectors = vstruct_blocks(bne, c->block_bits) <<
116 printk(KERN_ERR "*** on disk block %u:\n", offset);
117 bch2_dump_bset(b, i, offset);
122 printk(KERN_ERR "*** block %u/%u not written\n",
123 offset >> c->block_bits, btree_blocks(c));
125 for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
126 if (inmemory->_data[j] != sorted->_data[j])
129 printk(KERN_ERR "b->written %u\n", b->written);
132 panic("verify failed at %u\n", j);
135 mutex_unlock(&c->verify_lock);
136 btree_node_io_unlock(b);
141 #ifdef CONFIG_DEBUG_FS
143 /* XXX: bch_fs refcounting */
151 size_t bytes; /* what's currently in buf */
153 char __user *ubuf; /* destination user buffer */
154 size_t size; /* size of requested read */
155 ssize_t ret; /* bytes read so far */
158 static int flush_buf(struct dump_iter *i)
161 size_t bytes = min(i->bytes, i->size);
162 int err = copy_to_user(i->ubuf, i->buf, bytes);
171 memmove(i->buf, i->buf + bytes, i->bytes);
177 static int bch2_dump_open(struct inode *inode, struct file *file)
179 struct btree_debug *bd = inode->i_private;
182 i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
186 file->private_data = i;
188 i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]);
194 static int bch2_dump_release(struct inode *inode, struct file *file)
196 kfree(file->private_data);
200 static ssize_t bch2_read_btree(struct file *file, char __user *buf,
201 size_t size, loff_t *ppos)
203 struct dump_iter *i = file->private_data;
204 struct btree_iter iter;
219 bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
220 k = bch2_btree_iter_peek(&iter);
222 while (k.k && !(err = btree_iter_err(k))) {
223 bch2_bkey_val_to_text(i->c, bkey_type(0, i->id),
224 i->buf, sizeof(i->buf), k);
225 i->bytes = strlen(i->buf);
226 BUG_ON(i->bytes >= PAGE_SIZE);
227 i->buf[i->bytes] = '\n';
230 k = bch2_btree_iter_next(&iter);
240 bch2_btree_iter_unlock(&iter);
242 return err < 0 ? err : i->ret;
245 static const struct file_operations btree_debug_ops = {
246 .owner = THIS_MODULE,
247 .open = bch2_dump_open,
248 .release = bch2_dump_release,
249 .read = bch2_read_btree,
252 static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
253 size_t size, loff_t *ppos)
255 struct dump_iter *i = file->private_data;
256 struct btree_iter iter;
268 if (!i->size || !bkey_cmp(POS_MAX, i->from))
271 for_each_btree_node(&iter, i->c, i->id, i->from, 0, b) {
272 i->bytes = bch2_print_btree_node(i->c, b, i->buf,
279 * can't easily correctly restart a btree node traversal across
282 i->from = bkey_cmp(POS_MAX, b->key.k.p)
283 ? bkey_successor(b->key.k.p)
289 bch2_btree_iter_unlock(&iter);
291 return err < 0 ? err : i->ret;
294 static const struct file_operations btree_format_debug_ops = {
295 .owner = THIS_MODULE,
296 .open = bch2_dump_open,
297 .release = bch2_dump_release,
298 .read = bch2_read_btree_formats,
301 static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
302 size_t size, loff_t *ppos)
304 struct dump_iter *i = file->private_data;
305 struct btree_iter iter;
307 struct btree *prev_node = NULL;
321 bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
323 while ((k = bch2_btree_iter_peek(&iter)).k &&
324 !(err = btree_iter_err(k))) {
325 struct btree_iter_level *l = &iter.l[0];
326 struct bkey_packed *_k =
327 bch2_btree_node_iter_peek(&l->iter, l->b);
329 if (l->b != prev_node) {
330 i->bytes = bch2_print_btree_node(i->c, l->b, i->buf,
338 i->bytes = bch2_bkey_print_bfloat(l->b, _k, i->buf,
345 bch2_btree_iter_next(&iter);
355 bch2_btree_iter_unlock(&iter);
357 return err < 0 ? err : i->ret;
360 static const struct file_operations bfloat_failed_debug_ops = {
361 .owner = THIS_MODULE,
362 .open = bch2_dump_open,
363 .release = bch2_dump_release,
364 .read = bch2_read_bfloat_failed,
367 void bch2_fs_debug_exit(struct bch_fs *c)
369 if (!IS_ERR_OR_NULL(c->debug))
370 debugfs_remove_recursive(c->debug);
373 void bch2_fs_debug_init(struct bch_fs *c)
375 struct btree_debug *bd;
378 if (IS_ERR_OR_NULL(bch_debug))
381 snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
382 c->debug = debugfs_create_dir(name, bch_debug);
383 if (IS_ERR_OR_NULL(c->debug))
386 for (bd = c->btree_debug;
387 bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
389 bd->id = bd - c->btree_debug;
390 bd->btree = debugfs_create_file(bch2_btree_ids[bd->id],
394 snprintf(name, sizeof(name), "%s-formats",
395 bch2_btree_ids[bd->id]);
397 bd->btree_format = debugfs_create_file(name, 0400, c->debug, bd,
398 &btree_format_debug_ops);
400 snprintf(name, sizeof(name), "%s-bfloat-failed",
401 bch2_btree_ids[bd->id]);
403 bd->failed = debugfs_create_file(name, 0400, c->debug, bd,
404 &bfloat_failed_debug_ops);
410 void bch2_debug_exit(void)
412 if (!IS_ERR_OR_NULL(bch_debug))
413 debugfs_remove_recursive(bch_debug);
416 int __init bch2_debug_init(void)
420 bch_debug = debugfs_create_dir("bcachefs", NULL);