]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/debug.c
Update bcachefs sources to bdf6d7c135 fixup! bcachefs: Kill journal buf bloom filter
[bcachefs-tools-debian] / libbcachefs / debug.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Assorted bcachefs debug code
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
12 #include "btree_io.h"
13 #include "btree_iter.h"
14 #include "btree_update.h"
15 #include "buckets.h"
16 #include "debug.h"
17 #include "error.h"
18 #include "extents.h"
19 #include "fsck.h"
20 #include "inode.h"
21 #include "io.h"
22 #include "super.h"
23
24 #include <linux/console.h>
25 #include <linux/debugfs.h>
26 #include <linux/module.h>
27 #include <linux/random.h>
28 #include <linux/seq_file.h>
29
30 static struct dentry *bch_debug;
31
32 static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
33                                       struct extent_ptr_decoded pick)
34 {
35         struct btree *v = c->verify_data;
36         struct btree_node *n_ondisk = c->verify_ondisk;
37         struct btree_node *n_sorted = c->verify_data->data;
38         struct bset *sorted, *inmemory = &b->data->keys;
39         struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
40         struct bio *bio;
41         bool failed = false;
42
43         if (!bch2_dev_get_ioref(ca, READ))
44                 return false;
45
46         bio = bio_alloc_bioset(GFP_NOIO,
47                         buf_pages(n_sorted, btree_bytes(c)),
48                         &c->btree_bio);
49         bio_set_dev(bio, ca->disk_sb.bdev);
50         bio->bi_opf             = REQ_OP_READ|REQ_META;
51         bio->bi_iter.bi_sector  = pick.ptr.offset;
52         bch2_bio_map(bio, n_sorted, btree_bytes(c));
53
54         submit_bio_wait(bio);
55
56         bio_put(bio);
57         percpu_ref_put(&ca->io_ref);
58
59         memcpy(n_ondisk, n_sorted, btree_bytes(c));
60
61         v->written = 0;
62         if (bch2_btree_node_read_done(c, ca, v, false))
63                 return false;
64
65         n_sorted = c->verify_data->data;
66         sorted = &n_sorted->keys;
67
68         if (inmemory->u64s != sorted->u64s ||
69             memcmp(inmemory->start,
70                    sorted->start,
71                    vstruct_end(inmemory) - (void *) inmemory->start)) {
72                 unsigned offset = 0, sectors;
73                 struct bset *i;
74                 unsigned j;
75
76                 console_lock();
77
78                 printk(KERN_ERR "*** in memory:\n");
79                 bch2_dump_bset(c, b, inmemory, 0);
80
81                 printk(KERN_ERR "*** read back in:\n");
82                 bch2_dump_bset(c, v, sorted, 0);
83
84                 while (offset < v->written) {
85                         if (!offset) {
86                                 i = &n_ondisk->keys;
87                                 sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
88                                         c->block_bits;
89                         } else {
90                                 struct btree_node_entry *bne =
91                                         (void *) n_ondisk + (offset << 9);
92                                 i = &bne->keys;
93
94                                 sectors = vstruct_blocks(bne, c->block_bits) <<
95                                         c->block_bits;
96                         }
97
98                         printk(KERN_ERR "*** on disk block %u:\n", offset);
99                         bch2_dump_bset(c, b, i, offset);
100
101                         offset += sectors;
102                 }
103
104                 for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
105                         if (inmemory->_data[j] != sorted->_data[j])
106                                 break;
107
108                 console_unlock();
109                 bch_err(c, "verify failed at key %u", j);
110
111                 failed = true;
112         }
113
114         if (v->written != b->written) {
115                 bch_err(c, "written wrong: expected %u, got %u",
116                         b->written, v->written);
117                 failed = true;
118         }
119
120         return failed;
121 }
122
123 void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
124 {
125         struct bkey_ptrs_c ptrs;
126         struct extent_ptr_decoded p;
127         const union bch_extent_entry *entry;
128         struct btree *v;
129         struct bset *inmemory = &b->data->keys;
130         struct bkey_packed *k;
131         bool failed = false;
132
133         if (c->opts.nochanges)
134                 return;
135
136         bch2_btree_node_io_lock(b);
137         mutex_lock(&c->verify_lock);
138
139         if (!c->verify_ondisk) {
140                 c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
141                 if (!c->verify_ondisk)
142                         goto out;
143         }
144
145         if (!c->verify_data) {
146                 c->verify_data = __bch2_btree_node_mem_alloc(c);
147                 if (!c->verify_data)
148                         goto out;
149
150                 list_del_init(&c->verify_data->list);
151         }
152
153         BUG_ON(b->nsets != 1);
154
155         for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_next(k))
156                 if (k->type == KEY_TYPE_btree_ptr_v2) {
157                         struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k);
158                         v->mem_ptr = 0;
159                 }
160
161         v = c->verify_data;
162         bkey_copy(&v->key, &b->key);
163         v->c.level      = b->c.level;
164         v->c.btree_id   = b->c.btree_id;
165         bch2_btree_keys_init(v);
166
167         ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
168         bkey_for_each_ptr_decode(&b->key.k, ptrs, p, entry)
169                 failed |= bch2_btree_verify_replica(c, b, p);
170
171         if (failed) {
172                 struct printbuf buf = PRINTBUF;
173
174                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
175                 bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf.buf);
176                 printbuf_exit(&buf);
177         }
178 out:
179         mutex_unlock(&c->verify_lock);
180         bch2_btree_node_io_unlock(b);
181 }
182
183 #ifdef CONFIG_DEBUG_FS
184
185 /* XXX: bch_fs refcounting */
186
187 struct dump_iter {
188         struct bch_fs           *c;
189         enum btree_id           id;
190         struct bpos             from;
191         u64                     iter;
192
193         struct printbuf         buf;
194
195         char __user             *ubuf;  /* destination user buffer */
196         size_t                  size;   /* size of requested read */
197         ssize_t                 ret;    /* bytes read so far */
198 };
199
200 static int flush_buf(struct dump_iter *i)
201 {
202         if (i->buf.pos) {
203                 size_t bytes = min_t(size_t, i->buf.pos, i->size);
204                 int err = copy_to_user(i->ubuf, i->buf.buf, bytes);
205
206                 if (err)
207                         return err;
208
209                 i->ret   += bytes;
210                 i->ubuf  += bytes;
211                 i->size  -= bytes;
212                 i->buf.pos -= bytes;
213                 memmove(i->buf.buf, i->buf.buf + bytes, i->buf.pos);
214         }
215
216         return 0;
217 }
218
219 static int bch2_dump_open(struct inode *inode, struct file *file)
220 {
221         struct btree_debug *bd = inode->i_private;
222         struct dump_iter *i;
223
224         i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
225         if (!i)
226                 return -ENOMEM;
227
228         file->private_data = i;
229         i->from = POS_MIN;
230         i->iter = 0;
231         i->c    = container_of(bd, struct bch_fs, btree_debug[bd->id]);
232         i->id   = bd->id;
233         i->buf  = PRINTBUF;
234
235         return 0;
236 }
237
238 static int bch2_dump_release(struct inode *inode, struct file *file)
239 {
240         struct dump_iter *i = file->private_data;
241
242         printbuf_exit(&i->buf);
243         kfree(i);
244         return 0;
245 }
246
247 static ssize_t bch2_read_btree(struct file *file, char __user *buf,
248                                size_t size, loff_t *ppos)
249 {
250         struct dump_iter *i = file->private_data;
251         struct btree_trans trans;
252         struct btree_iter iter;
253         struct bkey_s_c k;
254         int err;
255
256         i->ubuf = buf;
257         i->size = size;
258         i->ret  = 0;
259
260         err = flush_buf(i);
261         if (err)
262                 return err;
263
264         if (!i->size)
265                 return i->ret;
266
267         bch2_trans_init(&trans, i->c, 0, 0);
268
269         bch2_trans_iter_init(&trans, &iter, i->id, i->from,
270                              BTREE_ITER_PREFETCH|
271                              BTREE_ITER_ALL_SNAPSHOTS);
272         k = bch2_btree_iter_peek(&iter);
273
274         while (k.k && !(err = bkey_err(k))) {
275                 bch2_bkey_val_to_text(&i->buf, i->c, k);
276                 pr_char(&i->buf, '\n');
277
278                 k = bch2_btree_iter_next(&iter);
279                 i->from = iter.pos;
280
281                 err = flush_buf(i);
282                 if (err)
283                         break;
284
285                 if (!i->size)
286                         break;
287         }
288         bch2_trans_iter_exit(&trans, &iter);
289
290         bch2_trans_exit(&trans);
291
292         return err < 0 ? err : i->ret;
293 }
294
295 static const struct file_operations btree_debug_ops = {
296         .owner          = THIS_MODULE,
297         .open           = bch2_dump_open,
298         .release        = bch2_dump_release,
299         .read           = bch2_read_btree,
300 };
301
302 static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
303                                        size_t size, loff_t *ppos)
304 {
305         struct dump_iter *i = file->private_data;
306         struct btree_trans trans;
307         struct btree_iter iter;
308         struct btree *b;
309         int err;
310
311         i->ubuf = buf;
312         i->size = size;
313         i->ret  = 0;
314
315         err = flush_buf(i);
316         if (err)
317                 return err;
318
319         if (!i->size || !bpos_cmp(SPOS_MAX, i->from))
320                 return i->ret;
321
322         bch2_trans_init(&trans, i->c, 0, 0);
323
324         for_each_btree_node(&trans, iter, i->id, i->from, 0, b, err) {
325                 bch2_btree_node_to_text(&i->buf, i->c, b);
326                 err = flush_buf(i);
327                 if (err)
328                         break;
329
330                 /*
331                  * can't easily correctly restart a btree node traversal across
332                  * all nodes, meh
333                  */
334                 i->from = bpos_cmp(SPOS_MAX, b->key.k.p)
335                         ? bpos_successor(b->key.k.p)
336                         : b->key.k.p;
337
338                 if (!i->size)
339                         break;
340         }
341         bch2_trans_iter_exit(&trans, &iter);
342
343         bch2_trans_exit(&trans);
344
345         return err < 0 ? err : i->ret;
346 }
347
348 static const struct file_operations btree_format_debug_ops = {
349         .owner          = THIS_MODULE,
350         .open           = bch2_dump_open,
351         .release        = bch2_dump_release,
352         .read           = bch2_read_btree_formats,
353 };
354
355 static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
356                                        size_t size, loff_t *ppos)
357 {
358         struct dump_iter *i = file->private_data;
359         struct btree_trans trans;
360         struct btree_iter iter;
361         struct bkey_s_c k;
362         struct btree *prev_node = NULL;
363         int err;
364
365         i->ubuf = buf;
366         i->size = size;
367         i->ret  = 0;
368
369         err = flush_buf(i);
370         if (err)
371                 return err;
372
373         if (!i->size)
374                 return i->ret;
375
376         bch2_trans_init(&trans, i->c, 0, 0);
377
378         bch2_trans_iter_init(&trans, &iter, i->id, i->from,
379                              BTREE_ITER_PREFETCH|
380                              BTREE_ITER_ALL_SNAPSHOTS);
381
382         while ((k = bch2_btree_iter_peek(&iter)).k &&
383                !(err = bkey_err(k))) {
384                 struct btree_path_level *l = &iter.path->l[0];
385                 struct bkey_packed *_k =
386                         bch2_btree_node_iter_peek(&l->iter, l->b);
387
388                 if (l->b != prev_node) {
389                         bch2_btree_node_to_text(&i->buf, i->c, l->b);
390                         err = flush_buf(i);
391                         if (err)
392                                 break;
393                 }
394                 prev_node = l->b;
395
396                 bch2_bfloat_to_text(&i->buf, l->b, _k);
397                 err = flush_buf(i);
398                 if (err)
399                         break;
400
401                 bch2_btree_iter_advance(&iter);
402                 i->from = iter.pos;
403
404                 err = flush_buf(i);
405                 if (err)
406                         break;
407
408                 if (!i->size)
409                         break;
410         }
411         bch2_trans_iter_exit(&trans, &iter);
412
413         bch2_trans_exit(&trans);
414
415         return err < 0 ? err : i->ret;
416 }
417
418 static const struct file_operations bfloat_failed_debug_ops = {
419         .owner          = THIS_MODULE,
420         .open           = bch2_dump_open,
421         .release        = bch2_dump_release,
422         .read           = bch2_read_bfloat_failed,
423 };
424
425 static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
426                                            struct btree *b)
427 {
428         out->tabstops[0] = 32;
429
430         pr_buf(out, "%px btree=%s l=%u ",
431                b,
432                bch2_btree_ids[b->c.btree_id],
433                b->c.level);
434         pr_newline(out);
435
436         pr_indent_push(out, 2);
437
438         bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
439         pr_newline(out);
440
441         pr_buf(out, "flags: ");
442         pr_tab(out);
443         bch2_flags_to_text(out, bch2_btree_node_flags, b->flags);
444         pr_newline(out);
445
446         pr_buf(out, "pcpu read locks: ");
447         pr_tab(out);
448         pr_buf(out, "%u", b->c.lock.readers != NULL);
449         pr_newline(out);
450
451         pr_buf(out, "written:");
452         pr_tab(out);
453         pr_buf(out, "%u", b->written);
454         pr_newline(out);
455
456         pr_buf(out, "writes blocked:");
457         pr_tab(out);
458         pr_buf(out, "%u", !list_empty_careful(&b->write_blocked));
459         pr_newline(out);
460
461         pr_buf(out, "will make reachable:");
462         pr_tab(out);
463         pr_buf(out, "%lx", b->will_make_reachable);
464         pr_newline(out);
465
466         pr_buf(out, "journal pin %px:", &b->writes[0].journal);
467         pr_tab(out);
468         pr_buf(out, "%llu", b->writes[0].journal.seq);
469         pr_newline(out);
470
471         pr_buf(out, "journal pin %px:", &b->writes[1].journal);
472         pr_tab(out);
473         pr_buf(out, "%llu", b->writes[1].journal.seq);
474         pr_newline(out);
475
476         pr_indent_pop(out, 2);
477 }
478
479 static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf,
480                                             size_t size, loff_t *ppos)
481 {
482         struct dump_iter *i = file->private_data;
483         struct bch_fs *c = i->c;
484         bool done = false;
485         int err;
486
487         i->ubuf = buf;
488         i->size = size;
489         i->ret  = 0;
490
491         do {
492                 struct bucket_table *tbl;
493                 struct rhash_head *pos;
494                 struct btree *b;
495
496                 err = flush_buf(i);
497                 if (err)
498                         return err;
499
500                 if (!i->size)
501                         break;
502
503                 rcu_read_lock();
504                 i->buf.atomic++;
505                 tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
506                                           &c->btree_cache.table);
507                 if (i->iter < tbl->size) {
508                         rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
509                                 bch2_cached_btree_node_to_text(&i->buf, c, b);
510                         i->iter++;;
511                 } else {
512                         done = true;
513                 }
514                 --i->buf.atomic;
515                 rcu_read_unlock();
516         } while (!done);
517
518         if (i->buf.allocation_failure)
519                 return -ENOMEM;
520
521         return i->ret;
522 }
523
524 static const struct file_operations cached_btree_nodes_ops = {
525         .owner          = THIS_MODULE,
526         .open           = bch2_dump_open,
527         .release        = bch2_dump_release,
528         .read           = bch2_cached_btree_nodes_read,
529 };
530
531 static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
532                                       size_t size, loff_t *ppos)
533 {
534         struct dump_iter *i = file->private_data;
535         struct bch_fs *c = i->c;
536         bool done = false;
537         int err;
538
539         i->ubuf = buf;
540         i->size = size;
541         i->ret  = 0;
542
543         do {
544                 err = flush_buf(i);
545                 if (err)
546                         return err;
547
548                 if (!i->size)
549                         break;
550
551                 done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter);
552                 i->iter++;
553         } while (!done);
554
555         if (i->buf.allocation_failure)
556                 return -ENOMEM;
557
558         return i->ret;
559 }
560
561 static const struct file_operations journal_pins_ops = {
562         .owner          = THIS_MODULE,
563         .open           = bch2_dump_open,
564         .release        = bch2_dump_release,
565         .read           = bch2_journal_pins_read,
566 };
567
568 void bch2_fs_debug_exit(struct bch_fs *c)
569 {
570         if (!IS_ERR_OR_NULL(c->fs_debug_dir))
571                 debugfs_remove_recursive(c->fs_debug_dir);
572 }
573
574 void bch2_fs_debug_init(struct bch_fs *c)
575 {
576         struct btree_debug *bd;
577         char name[100];
578
579         if (IS_ERR_OR_NULL(bch_debug))
580                 return;
581
582         snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
583         c->fs_debug_dir = debugfs_create_dir(name, bch_debug);
584         if (IS_ERR_OR_NULL(c->fs_debug_dir))
585                 return;
586
587         debugfs_create_file("cached_btree_nodes", 0400, c->fs_debug_dir,
588                             c->btree_debug, &cached_btree_nodes_ops);
589
590         debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
591                             c->btree_debug, &journal_pins_ops);
592
593         c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir);
594         if (IS_ERR_OR_NULL(c->btree_debug_dir))
595                 return;
596
597         for (bd = c->btree_debug;
598              bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
599              bd++) {
600                 bd->id = bd - c->btree_debug;
601                 debugfs_create_file(bch2_btree_ids[bd->id],
602                                     0400, c->btree_debug_dir, bd,
603                                     &btree_debug_ops);
604
605                 snprintf(name, sizeof(name), "%s-formats",
606                          bch2_btree_ids[bd->id]);
607
608                 debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
609                                     &btree_format_debug_ops);
610
611                 snprintf(name, sizeof(name), "%s-bfloat-failed",
612                          bch2_btree_ids[bd->id]);
613
614                 debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
615                                     &bfloat_failed_debug_ops);
616         }
617 }
618
619 #endif
620
621 void bch2_debug_exit(void)
622 {
623         if (!IS_ERR_OR_NULL(bch_debug))
624                 debugfs_remove_recursive(bch_debug);
625 }
626
627 int __init bch2_debug_init(void)
628 {
629         int ret = 0;
630
631         bch_debug = debugfs_create_dir("bcachefs", NULL);
632         return ret;
633 }