]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/debug.c
Update bcachefs sources to ed4aea2ad4 bcachefs: fix gcc warning
[bcachefs-tools-debian] / libbcachefs / debug.c
1 /*
2  * Assorted bcachefs debug code
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "bkey_methods.h"
10 #include "btree_cache.h"
11 #include "btree_io.h"
12 #include "btree_iter.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "debug.h"
16 #include "error.h"
17 #include "extents.h"
18 #include "fsck.h"
19 #include "inode.h"
20 #include "io.h"
21 #include "super.h"
22
23 #include <linux/console.h>
24 #include <linux/debugfs.h>
25 #include <linux/module.h>
26 #include <linux/random.h>
27 #include <linux/seq_file.h>
28
29 static struct dentry *bch_debug;
30
31 #ifdef CONFIG_BCACHEFS_DEBUG
32
33 void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
34 {
35         struct btree *v = c->verify_data;
36         struct btree_node *n_ondisk, *n_sorted, *n_inmemory;
37         struct bset *sorted, *inmemory;
38         struct extent_pick_ptr pick;
39         struct bch_dev *ca;
40         struct bio *bio;
41
42         if (c->opts.nochanges)
43                 return;
44
45         btree_node_io_lock(b);
46         mutex_lock(&c->verify_lock);
47
48         n_ondisk = c->verify_ondisk;
49         n_sorted = c->verify_data->data;
50         n_inmemory = b->data;
51
52         bkey_copy(&v->key, &b->key);
53         v->written      = 0;
54         v->level        = b->level;
55         v->btree_id     = b->btree_id;
56         bch2_btree_keys_init(v, &c->expensive_debug_checks);
57
58         if (bch2_btree_pick_ptr(c, b, NULL, &pick) <= 0)
59                 return;
60
61         ca = bch_dev_bkey_exists(c, pick.ptr.dev);
62         if (!bch2_dev_get_ioref(ca, READ))
63                 return;
64
65         bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio);
66         bio_set_dev(bio, ca->disk_sb.bdev);
67         bio->bi_opf             = REQ_OP_READ|REQ_META;
68         bio->bi_iter.bi_sector  = pick.ptr.offset;
69         bio->bi_iter.bi_size    = btree_bytes(c);
70         bch2_bio_map(bio, n_sorted);
71
72         submit_bio_wait(bio);
73
74         bio_put(bio);
75         percpu_ref_put(&ca->io_ref);
76
77         memcpy(n_ondisk, n_sorted, btree_bytes(c));
78
79         if (bch2_btree_node_read_done(c, v, false))
80                 goto out;
81
82         n_sorted = c->verify_data->data;
83         sorted = &n_sorted->keys;
84         inmemory = &n_inmemory->keys;
85
86         if (inmemory->u64s != sorted->u64s ||
87             memcmp(inmemory->start,
88                    sorted->start,
89                    vstruct_end(inmemory) - (void *) inmemory->start)) {
90                 unsigned offset = 0, sectors;
91                 struct bset *i;
92                 unsigned j;
93
94                 console_lock();
95
96                 printk(KERN_ERR "*** in memory:\n");
97                 bch2_dump_bset(b, inmemory, 0);
98
99                 printk(KERN_ERR "*** read back in:\n");
100                 bch2_dump_bset(v, sorted, 0);
101
102                 while (offset < b->written) {
103                         if (!offset ) {
104                                 i = &n_ondisk->keys;
105                                 sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
106                                         c->block_bits;
107                         } else {
108                                 struct btree_node_entry *bne =
109                                         (void *) n_ondisk + (offset << 9);
110                                 i = &bne->keys;
111
112                                 sectors = vstruct_blocks(bne, c->block_bits) <<
113                                         c->block_bits;
114                         }
115
116                         printk(KERN_ERR "*** on disk block %u:\n", offset);
117                         bch2_dump_bset(b, i, offset);
118
119                         offset += sectors;
120                 }
121
122                 printk(KERN_ERR "*** block %u/%u not written\n",
123                        offset >> c->block_bits, btree_blocks(c));
124
125                 for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
126                         if (inmemory->_data[j] != sorted->_data[j])
127                                 break;
128
129                 printk(KERN_ERR "b->written %u\n", b->written);
130
131                 console_unlock();
132                 panic("verify failed at %u\n", j);
133         }
134 out:
135         mutex_unlock(&c->verify_lock);
136         btree_node_io_unlock(b);
137 }
138
139 #endif
140
141 #ifdef CONFIG_DEBUG_FS
142
143 /* XXX: bch_fs refcounting */
144
145 struct dump_iter {
146         struct bpos             from;
147         struct bch_fs   *c;
148         enum btree_id           id;
149
150         char                    buf[PAGE_SIZE];
151         size_t                  bytes;  /* what's currently in buf */
152
153         char __user             *ubuf;  /* destination user buffer */
154         size_t                  size;   /* size of requested read */
155         ssize_t                 ret;    /* bytes read so far */
156 };
157
158 static int flush_buf(struct dump_iter *i)
159 {
160         if (i->bytes) {
161                 size_t bytes = min(i->bytes, i->size);
162                 int err = copy_to_user(i->ubuf, i->buf, bytes);
163
164                 if (err)
165                         return err;
166
167                 i->ret   += bytes;
168                 i->ubuf  += bytes;
169                 i->size  -= bytes;
170                 i->bytes -= bytes;
171                 memmove(i->buf, i->buf + bytes, i->bytes);
172         }
173
174         return 0;
175 }
176
177 static int bch2_dump_open(struct inode *inode, struct file *file)
178 {
179         struct btree_debug *bd = inode->i_private;
180         struct dump_iter *i;
181
182         i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
183         if (!i)
184                 return -ENOMEM;
185
186         file->private_data = i;
187         i->from = POS_MIN;
188         i->c    = container_of(bd, struct bch_fs, btree_debug[bd->id]);
189         i->id   = bd->id;
190
191         return 0;
192 }
193
194 static int bch2_dump_release(struct inode *inode, struct file *file)
195 {
196         kfree(file->private_data);
197         return 0;
198 }
199
200 static ssize_t bch2_read_btree(struct file *file, char __user *buf,
201                                size_t size, loff_t *ppos)
202 {
203         struct dump_iter *i = file->private_data;
204         struct btree_iter iter;
205         struct bkey_s_c k;
206         int err;
207
208         i->ubuf = buf;
209         i->size = size;
210         i->ret  = 0;
211
212         err = flush_buf(i);
213         if (err)
214                 return err;
215
216         if (!i->size)
217                 return i->ret;
218
219         bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
220         k = bch2_btree_iter_peek(&iter);
221
222         while (k.k && !(err = btree_iter_err(k))) {
223                 bch2_bkey_val_to_text(i->c, bkey_type(0, i->id),
224                                       i->buf, sizeof(i->buf), k);
225                 i->bytes = strlen(i->buf);
226                 BUG_ON(i->bytes >= PAGE_SIZE);
227                 i->buf[i->bytes] = '\n';
228                 i->bytes++;
229
230                 k = bch2_btree_iter_next(&iter);
231                 i->from = iter.pos;
232
233                 err = flush_buf(i);
234                 if (err)
235                         break;
236
237                 if (!i->size)
238                         break;
239         }
240         bch2_btree_iter_unlock(&iter);
241
242         return err < 0 ? err : i->ret;
243 }
244
245 static const struct file_operations btree_debug_ops = {
246         .owner          = THIS_MODULE,
247         .open           = bch2_dump_open,
248         .release        = bch2_dump_release,
249         .read           = bch2_read_btree,
250 };
251
252 static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
253                                        size_t size, loff_t *ppos)
254 {
255         struct dump_iter *i = file->private_data;
256         struct btree_iter iter;
257         struct btree *b;
258         int err;
259
260         i->ubuf = buf;
261         i->size = size;
262         i->ret  = 0;
263
264         err = flush_buf(i);
265         if (err)
266                 return err;
267
268         if (!i->size || !bkey_cmp(POS_MAX, i->from))
269                 return i->ret;
270
271         for_each_btree_node(&iter, i->c, i->id, i->from, 0, b) {
272                 i->bytes = bch2_print_btree_node(i->c, b, i->buf,
273                                                 sizeof(i->buf));
274                 err = flush_buf(i);
275                 if (err)
276                         break;
277
278                 /*
279                  * can't easily correctly restart a btree node traversal across
280                  * all nodes, meh
281                  */
282                 i->from = bkey_cmp(POS_MAX, b->key.k.p)
283                         ? bkey_successor(b->key.k.p)
284                         : b->key.k.p;
285
286                 if (!i->size)
287                         break;
288         }
289         bch2_btree_iter_unlock(&iter);
290
291         return err < 0 ? err : i->ret;
292 }
293
294 static const struct file_operations btree_format_debug_ops = {
295         .owner          = THIS_MODULE,
296         .open           = bch2_dump_open,
297         .release        = bch2_dump_release,
298         .read           = bch2_read_btree_formats,
299 };
300
301 static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
302                                        size_t size, loff_t *ppos)
303 {
304         struct dump_iter *i = file->private_data;
305         struct btree_iter iter;
306         struct bkey_s_c k;
307         struct btree *prev_node = NULL;
308         int err;
309
310         i->ubuf = buf;
311         i->size = size;
312         i->ret  = 0;
313
314         err = flush_buf(i);
315         if (err)
316                 return err;
317
318         if (!i->size)
319                 return i->ret;
320
321         bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
322
323         while ((k = bch2_btree_iter_peek(&iter)).k &&
324                !(err = btree_iter_err(k))) {
325                 struct btree_iter_level *l = &iter.l[0];
326                 struct bkey_packed *_k =
327                         bch2_btree_node_iter_peek(&l->iter, l->b);
328
329                 if (l->b != prev_node) {
330                         i->bytes = bch2_print_btree_node(i->c, l->b, i->buf,
331                                                         sizeof(i->buf));
332                         err = flush_buf(i);
333                         if (err)
334                                 break;
335                 }
336                 prev_node = l->b;
337
338                 i->bytes = bch2_bkey_print_bfloat(l->b, _k, i->buf,
339                                                   sizeof(i->buf));
340
341                 err = flush_buf(i);
342                 if (err)
343                         break;
344
345                 bch2_btree_iter_next(&iter);
346                 i->from = iter.pos;
347
348                 err = flush_buf(i);
349                 if (err)
350                         break;
351
352                 if (!i->size)
353                         break;
354         }
355         bch2_btree_iter_unlock(&iter);
356
357         return err < 0 ? err : i->ret;
358 }
359
360 static const struct file_operations bfloat_failed_debug_ops = {
361         .owner          = THIS_MODULE,
362         .open           = bch2_dump_open,
363         .release        = bch2_dump_release,
364         .read           = bch2_read_bfloat_failed,
365 };
366
367 void bch2_fs_debug_exit(struct bch_fs *c)
368 {
369         if (!IS_ERR_OR_NULL(c->debug))
370                 debugfs_remove_recursive(c->debug);
371 }
372
373 void bch2_fs_debug_init(struct bch_fs *c)
374 {
375         struct btree_debug *bd;
376         char name[100];
377
378         if (IS_ERR_OR_NULL(bch_debug))
379                 return;
380
381         snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
382         c->debug = debugfs_create_dir(name, bch_debug);
383         if (IS_ERR_OR_NULL(c->debug))
384                 return;
385
386         for (bd = c->btree_debug;
387              bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
388              bd++) {
389                 bd->id = bd - c->btree_debug;
390                 bd->btree = debugfs_create_file(bch2_btree_ids[bd->id],
391                                                 0400, c->debug, bd,
392                                                 &btree_debug_ops);
393
394                 snprintf(name, sizeof(name), "%s-formats",
395                          bch2_btree_ids[bd->id]);
396
397                 bd->btree_format = debugfs_create_file(name, 0400, c->debug, bd,
398                                                        &btree_format_debug_ops);
399
400                 snprintf(name, sizeof(name), "%s-bfloat-failed",
401                          bch2_btree_ids[bd->id]);
402
403                 bd->failed = debugfs_create_file(name, 0400, c->debug, bd,
404                                                  &bfloat_failed_debug_ops);
405         }
406 }
407
408 #endif
409
410 void bch2_debug_exit(void)
411 {
412         if (!IS_ERR_OR_NULL(bch_debug))
413                 debugfs_remove_recursive(bch_debug);
414 }
415
416 int __init bch2_debug_init(void)
417 {
418         int ret = 0;
419
420         bch_debug = debugfs_create_dir("bcachefs", NULL);
421         return ret;
422 }