]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/debug.c
Update bcachefs sources to 62de7539dc bcachefs: Make bkey types globally unique
[bcachefs-tools-debian] / libbcachefs / debug.c
1 /*
2  * Assorted bcachefs debug code
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "bkey_methods.h"
10 #include "btree_cache.h"
11 #include "btree_io.h"
12 #include "btree_iter.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "debug.h"
16 #include "error.h"
17 #include "extents.h"
18 #include "fsck.h"
19 #include "inode.h"
20 #include "io.h"
21 #include "super.h"
22
23 #include <linux/console.h>
24 #include <linux/debugfs.h>
25 #include <linux/module.h>
26 #include <linux/random.h>
27 #include <linux/seq_file.h>
28
29 static struct dentry *bch_debug;
30
31 #ifdef CONFIG_BCACHEFS_DEBUG
32
33 void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
34 {
35         struct btree *v = c->verify_data;
36         struct btree_node *n_ondisk, *n_sorted, *n_inmemory;
37         struct bset *sorted, *inmemory;
38         struct extent_ptr_decoded pick;
39         struct bch_dev *ca;
40         struct bio *bio;
41
42         if (c->opts.nochanges)
43                 return;
44
45         btree_node_io_lock(b);
46         mutex_lock(&c->verify_lock);
47
48         n_ondisk = c->verify_ondisk;
49         n_sorted = c->verify_data->data;
50         n_inmemory = b->data;
51
52         bkey_copy(&v->key, &b->key);
53         v->written      = 0;
54         v->level        = b->level;
55         v->btree_id     = b->btree_id;
56         bch2_btree_keys_init(v, &c->expensive_debug_checks);
57
58         if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
59                                        NULL, &pick) <= 0)
60                 return;
61
62         ca = bch_dev_bkey_exists(c, pick.ptr.dev);
63         if (!bch2_dev_get_ioref(ca, READ))
64                 return;
65
66         bio = bio_alloc_bioset(GFP_NOIO,
67                         buf_pages(n_sorted, btree_bytes(c)),
68                         &c->btree_bio);
69         bio_set_dev(bio, ca->disk_sb.bdev);
70         bio->bi_opf             = REQ_OP_READ|REQ_META;
71         bio->bi_iter.bi_sector  = pick.ptr.offset;
72         bio->bi_iter.bi_size    = btree_bytes(c);
73         bch2_bio_map(bio, n_sorted);
74
75         submit_bio_wait(bio);
76
77         bio_put(bio);
78         percpu_ref_put(&ca->io_ref);
79
80         memcpy(n_ondisk, n_sorted, btree_bytes(c));
81
82         if (bch2_btree_node_read_done(c, v, false))
83                 goto out;
84
85         n_sorted = c->verify_data->data;
86         sorted = &n_sorted->keys;
87         inmemory = &n_inmemory->keys;
88
89         if (inmemory->u64s != sorted->u64s ||
90             memcmp(inmemory->start,
91                    sorted->start,
92                    vstruct_end(inmemory) - (void *) inmemory->start)) {
93                 unsigned offset = 0, sectors;
94                 struct bset *i;
95                 unsigned j;
96
97                 console_lock();
98
99                 printk(KERN_ERR "*** in memory:\n");
100                 bch2_dump_bset(b, inmemory, 0);
101
102                 printk(KERN_ERR "*** read back in:\n");
103                 bch2_dump_bset(v, sorted, 0);
104
105                 while (offset < b->written) {
106                         if (!offset ) {
107                                 i = &n_ondisk->keys;
108                                 sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
109                                         c->block_bits;
110                         } else {
111                                 struct btree_node_entry *bne =
112                                         (void *) n_ondisk + (offset << 9);
113                                 i = &bne->keys;
114
115                                 sectors = vstruct_blocks(bne, c->block_bits) <<
116                                         c->block_bits;
117                         }
118
119                         printk(KERN_ERR "*** on disk block %u:\n", offset);
120                         bch2_dump_bset(b, i, offset);
121
122                         offset += sectors;
123                 }
124
125                 printk(KERN_ERR "*** block %u/%u not written\n",
126                        offset >> c->block_bits, btree_blocks(c));
127
128                 for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
129                         if (inmemory->_data[j] != sorted->_data[j])
130                                 break;
131
132                 printk(KERN_ERR "b->written %u\n", b->written);
133
134                 console_unlock();
135                 panic("verify failed at %u\n", j);
136         }
137 out:
138         mutex_unlock(&c->verify_lock);
139         btree_node_io_unlock(b);
140 }
141
142 #endif
143
144 #ifdef CONFIG_DEBUG_FS
145
146 /* XXX: bch_fs refcounting */
147
148 struct dump_iter {
149         struct bpos             from;
150         struct bch_fs   *c;
151         enum btree_id           id;
152
153         char                    buf[PAGE_SIZE];
154         size_t                  bytes;  /* what's currently in buf */
155
156         char __user             *ubuf;  /* destination user buffer */
157         size_t                  size;   /* size of requested read */
158         ssize_t                 ret;    /* bytes read so far */
159 };
160
161 static int flush_buf(struct dump_iter *i)
162 {
163         if (i->bytes) {
164                 size_t bytes = min(i->bytes, i->size);
165                 int err = copy_to_user(i->ubuf, i->buf, bytes);
166
167                 if (err)
168                         return err;
169
170                 i->ret   += bytes;
171                 i->ubuf  += bytes;
172                 i->size  -= bytes;
173                 i->bytes -= bytes;
174                 memmove(i->buf, i->buf + bytes, i->bytes);
175         }
176
177         return 0;
178 }
179
180 static int bch2_dump_open(struct inode *inode, struct file *file)
181 {
182         struct btree_debug *bd = inode->i_private;
183         struct dump_iter *i;
184
185         i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
186         if (!i)
187                 return -ENOMEM;
188
189         file->private_data = i;
190         i->from = POS_MIN;
191         i->c    = container_of(bd, struct bch_fs, btree_debug[bd->id]);
192         i->id   = bd->id;
193
194         return 0;
195 }
196
197 static int bch2_dump_release(struct inode *inode, struct file *file)
198 {
199         kfree(file->private_data);
200         return 0;
201 }
202
203 static ssize_t bch2_read_btree(struct file *file, char __user *buf,
204                                size_t size, loff_t *ppos)
205 {
206         struct dump_iter *i = file->private_data;
207         struct btree_iter iter;
208         struct bkey_s_c k;
209         int err;
210
211         i->ubuf = buf;
212         i->size = size;
213         i->ret  = 0;
214
215         err = flush_buf(i);
216         if (err)
217                 return err;
218
219         if (!i->size)
220                 return i->ret;
221
222         bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
223         k = bch2_btree_iter_peek(&iter);
224
225         while (k.k && !(err = btree_iter_err(k))) {
226                 bch2_bkey_val_to_text(&PBUF(i->buf), i->c, k);
227                 i->bytes = strlen(i->buf);
228                 BUG_ON(i->bytes >= PAGE_SIZE);
229                 i->buf[i->bytes] = '\n';
230                 i->bytes++;
231
232                 k = bch2_btree_iter_next(&iter);
233                 i->from = iter.pos;
234
235                 err = flush_buf(i);
236                 if (err)
237                         break;
238
239                 if (!i->size)
240                         break;
241         }
242         bch2_btree_iter_unlock(&iter);
243
244         return err < 0 ? err : i->ret;
245 }
246
247 static const struct file_operations btree_debug_ops = {
248         .owner          = THIS_MODULE,
249         .open           = bch2_dump_open,
250         .release        = bch2_dump_release,
251         .read           = bch2_read_btree,
252 };
253
254 static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
255                                        size_t size, loff_t *ppos)
256 {
257         struct dump_iter *i = file->private_data;
258         struct btree_iter iter;
259         struct btree *b;
260         int err;
261
262         i->ubuf = buf;
263         i->size = size;
264         i->ret  = 0;
265
266         err = flush_buf(i);
267         if (err)
268                 return err;
269
270         if (!i->size || !bkey_cmp(POS_MAX, i->from))
271                 return i->ret;
272
273         for_each_btree_node(&iter, i->c, i->id, i->from, 0, b) {
274                 bch2_btree_node_to_text(&PBUF(i->buf), i->c, b);
275                 i->bytes = strlen(i->buf);
276                 err = flush_buf(i);
277                 if (err)
278                         break;
279
280                 /*
281                  * can't easily correctly restart a btree node traversal across
282                  * all nodes, meh
283                  */
284                 i->from = bkey_cmp(POS_MAX, b->key.k.p)
285                         ? bkey_successor(b->key.k.p)
286                         : b->key.k.p;
287
288                 if (!i->size)
289                         break;
290         }
291         bch2_btree_iter_unlock(&iter);
292
293         return err < 0 ? err : i->ret;
294 }
295
296 static const struct file_operations btree_format_debug_ops = {
297         .owner          = THIS_MODULE,
298         .open           = bch2_dump_open,
299         .release        = bch2_dump_release,
300         .read           = bch2_read_btree_formats,
301 };
302
303 static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
304                                        size_t size, loff_t *ppos)
305 {
306         struct dump_iter *i = file->private_data;
307         struct btree_iter iter;
308         struct bkey_s_c k;
309         struct btree *prev_node = NULL;
310         int err;
311
312         i->ubuf = buf;
313         i->size = size;
314         i->ret  = 0;
315
316         err = flush_buf(i);
317         if (err)
318                 return err;
319
320         if (!i->size)
321                 return i->ret;
322
323         bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
324
325         while ((k = bch2_btree_iter_peek(&iter)).k &&
326                !(err = btree_iter_err(k))) {
327                 struct btree_iter_level *l = &iter.l[0];
328                 struct bkey_packed *_k =
329                         bch2_btree_node_iter_peek(&l->iter, l->b);
330
331                 if (l->b != prev_node) {
332                         bch2_btree_node_to_text(&PBUF(i->buf), i->c, l->b);
333                         i->bytes = strlen(i->buf);
334                         err = flush_buf(i);
335                         if (err)
336                                 break;
337                 }
338                 prev_node = l->b;
339
340                 bch2_bfloat_to_text(&PBUF(i->buf), l->b, _k);
341                 i->bytes = strlen(i->buf);
342                 err = flush_buf(i);
343                 if (err)
344                         break;
345
346                 bch2_btree_iter_next(&iter);
347                 i->from = iter.pos;
348
349                 err = flush_buf(i);
350                 if (err)
351                         break;
352
353                 if (!i->size)
354                         break;
355         }
356         bch2_btree_iter_unlock(&iter);
357
358         return err < 0 ? err : i->ret;
359 }
360
361 static const struct file_operations bfloat_failed_debug_ops = {
362         .owner          = THIS_MODULE,
363         .open           = bch2_dump_open,
364         .release        = bch2_dump_release,
365         .read           = bch2_read_bfloat_failed,
366 };
367
368 void bch2_fs_debug_exit(struct bch_fs *c)
369 {
370         if (!IS_ERR_OR_NULL(c->debug))
371                 debugfs_remove_recursive(c->debug);
372 }
373
374 void bch2_fs_debug_init(struct bch_fs *c)
375 {
376         struct btree_debug *bd;
377         char name[100];
378
379         if (IS_ERR_OR_NULL(bch_debug))
380                 return;
381
382         snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
383         c->debug = debugfs_create_dir(name, bch_debug);
384         if (IS_ERR_OR_NULL(c->debug))
385                 return;
386
387         for (bd = c->btree_debug;
388              bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
389              bd++) {
390                 bd->id = bd - c->btree_debug;
391                 bd->btree = debugfs_create_file(bch2_btree_ids[bd->id],
392                                                 0400, c->debug, bd,
393                                                 &btree_debug_ops);
394
395                 snprintf(name, sizeof(name), "%s-formats",
396                          bch2_btree_ids[bd->id]);
397
398                 bd->btree_format = debugfs_create_file(name, 0400, c->debug, bd,
399                                                        &btree_format_debug_ops);
400
401                 snprintf(name, sizeof(name), "%s-bfloat-failed",
402                          bch2_btree_ids[bd->id]);
403
404                 bd->failed = debugfs_create_file(name, 0400, c->debug, bd,
405                                                  &bfloat_failed_debug_ops);
406         }
407 }
408
409 #endif
410
411 void bch2_debug_exit(void)
412 {
413         if (!IS_ERR_OR_NULL(bch_debug))
414                 debugfs_remove_recursive(bch_debug);
415 }
416
417 int __init bch2_debug_init(void)
418 {
419         int ret = 0;
420
421         bch_debug = debugfs_create_dir("bcachefs", NULL);
422         return ret;
423 }