]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/debug.c
Update bcachefs sources to d7dbddc450 bcachefs: revamp to_text methods
[bcachefs-tools-debian] / libbcachefs / debug.c
1 /*
2  * Assorted bcachefs debug code
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "bkey_methods.h"
10 #include "btree_cache.h"
11 #include "btree_io.h"
12 #include "btree_iter.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "debug.h"
16 #include "error.h"
17 #include "extents.h"
18 #include "fsck.h"
19 #include "inode.h"
20 #include "io.h"
21 #include "super.h"
22
23 #include <linux/console.h>
24 #include <linux/debugfs.h>
25 #include <linux/module.h>
26 #include <linux/random.h>
27 #include <linux/seq_file.h>
28
29 static struct dentry *bch_debug;
30
31 #ifdef CONFIG_BCACHEFS_DEBUG
32
33 void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
34 {
35         struct btree *v = c->verify_data;
36         struct btree_node *n_ondisk, *n_sorted, *n_inmemory;
37         struct bset *sorted, *inmemory;
38         struct extent_ptr_decoded pick;
39         struct bch_dev *ca;
40         struct bio *bio;
41
42         if (c->opts.nochanges)
43                 return;
44
45         btree_node_io_lock(b);
46         mutex_lock(&c->verify_lock);
47
48         n_ondisk = c->verify_ondisk;
49         n_sorted = c->verify_data->data;
50         n_inmemory = b->data;
51
52         bkey_copy(&v->key, &b->key);
53         v->written      = 0;
54         v->level        = b->level;
55         v->btree_id     = b->btree_id;
56         bch2_btree_keys_init(v, &c->expensive_debug_checks);
57
58         if (bch2_btree_pick_ptr(c, b, NULL, &pick) <= 0)
59                 return;
60
61         ca = bch_dev_bkey_exists(c, pick.ptr.dev);
62         if (!bch2_dev_get_ioref(ca, READ))
63                 return;
64
65         bio = bio_alloc_bioset(GFP_NOIO,
66                         buf_pages(n_sorted, btree_bytes(c)),
67                         &c->btree_bio);
68         bio_set_dev(bio, ca->disk_sb.bdev);
69         bio->bi_opf             = REQ_OP_READ|REQ_META;
70         bio->bi_iter.bi_sector  = pick.ptr.offset;
71         bio->bi_iter.bi_size    = btree_bytes(c);
72         bch2_bio_map(bio, n_sorted);
73
74         submit_bio_wait(bio);
75
76         bio_put(bio);
77         percpu_ref_put(&ca->io_ref);
78
79         memcpy(n_ondisk, n_sorted, btree_bytes(c));
80
81         if (bch2_btree_node_read_done(c, v, false))
82                 goto out;
83
84         n_sorted = c->verify_data->data;
85         sorted = &n_sorted->keys;
86         inmemory = &n_inmemory->keys;
87
88         if (inmemory->u64s != sorted->u64s ||
89             memcmp(inmemory->start,
90                    sorted->start,
91                    vstruct_end(inmemory) - (void *) inmemory->start)) {
92                 unsigned offset = 0, sectors;
93                 struct bset *i;
94                 unsigned j;
95
96                 console_lock();
97
98                 printk(KERN_ERR "*** in memory:\n");
99                 bch2_dump_bset(b, inmemory, 0);
100
101                 printk(KERN_ERR "*** read back in:\n");
102                 bch2_dump_bset(v, sorted, 0);
103
104                 while (offset < b->written) {
105                         if (!offset ) {
106                                 i = &n_ondisk->keys;
107                                 sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
108                                         c->block_bits;
109                         } else {
110                                 struct btree_node_entry *bne =
111                                         (void *) n_ondisk + (offset << 9);
112                                 i = &bne->keys;
113
114                                 sectors = vstruct_blocks(bne, c->block_bits) <<
115                                         c->block_bits;
116                         }
117
118                         printk(KERN_ERR "*** on disk block %u:\n", offset);
119                         bch2_dump_bset(b, i, offset);
120
121                         offset += sectors;
122                 }
123
124                 printk(KERN_ERR "*** block %u/%u not written\n",
125                        offset >> c->block_bits, btree_blocks(c));
126
127                 for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
128                         if (inmemory->_data[j] != sorted->_data[j])
129                                 break;
130
131                 printk(KERN_ERR "b->written %u\n", b->written);
132
133                 console_unlock();
134                 panic("verify failed at %u\n", j);
135         }
136 out:
137         mutex_unlock(&c->verify_lock);
138         btree_node_io_unlock(b);
139 }
140
141 #endif
142
143 #ifdef CONFIG_DEBUG_FS
144
145 /* XXX: bch_fs refcounting */
146
147 struct dump_iter {
148         struct bpos             from;
149         struct bch_fs   *c;
150         enum btree_id           id;
151
152         char                    buf[PAGE_SIZE];
153         size_t                  bytes;  /* what's currently in buf */
154
155         char __user             *ubuf;  /* destination user buffer */
156         size_t                  size;   /* size of requested read */
157         ssize_t                 ret;    /* bytes read so far */
158 };
159
160 static int flush_buf(struct dump_iter *i)
161 {
162         if (i->bytes) {
163                 size_t bytes = min(i->bytes, i->size);
164                 int err = copy_to_user(i->ubuf, i->buf, bytes);
165
166                 if (err)
167                         return err;
168
169                 i->ret   += bytes;
170                 i->ubuf  += bytes;
171                 i->size  -= bytes;
172                 i->bytes -= bytes;
173                 memmove(i->buf, i->buf + bytes, i->bytes);
174         }
175
176         return 0;
177 }
178
179 static int bch2_dump_open(struct inode *inode, struct file *file)
180 {
181         struct btree_debug *bd = inode->i_private;
182         struct dump_iter *i;
183
184         i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
185         if (!i)
186                 return -ENOMEM;
187
188         file->private_data = i;
189         i->from = POS_MIN;
190         i->c    = container_of(bd, struct bch_fs, btree_debug[bd->id]);
191         i->id   = bd->id;
192
193         return 0;
194 }
195
196 static int bch2_dump_release(struct inode *inode, struct file *file)
197 {
198         kfree(file->private_data);
199         return 0;
200 }
201
202 static ssize_t bch2_read_btree(struct file *file, char __user *buf,
203                                size_t size, loff_t *ppos)
204 {
205         struct dump_iter *i = file->private_data;
206         struct btree_iter iter;
207         struct bkey_s_c k;
208         int err;
209
210         i->ubuf = buf;
211         i->size = size;
212         i->ret  = 0;
213
214         err = flush_buf(i);
215         if (err)
216                 return err;
217
218         if (!i->size)
219                 return i->ret;
220
221         bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
222         k = bch2_btree_iter_peek(&iter);
223
224         while (k.k && !(err = btree_iter_err(k))) {
225                 bch2_bkey_val_to_text(&PBUF(i->buf), i->c,
226                                       bkey_type(0, i->id), k);
227                 i->bytes = strlen(i->buf);
228                 BUG_ON(i->bytes >= PAGE_SIZE);
229                 i->buf[i->bytes] = '\n';
230                 i->bytes++;
231
232                 k = bch2_btree_iter_next(&iter);
233                 i->from = iter.pos;
234
235                 err = flush_buf(i);
236                 if (err)
237                         break;
238
239                 if (!i->size)
240                         break;
241         }
242         bch2_btree_iter_unlock(&iter);
243
244         return err < 0 ? err : i->ret;
245 }
246
247 static const struct file_operations btree_debug_ops = {
248         .owner          = THIS_MODULE,
249         .open           = bch2_dump_open,
250         .release        = bch2_dump_release,
251         .read           = bch2_read_btree,
252 };
253
254 static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
255                                        size_t size, loff_t *ppos)
256 {
257         struct dump_iter *i = file->private_data;
258         struct btree_iter iter;
259         struct btree *b;
260         int err;
261
262         i->ubuf = buf;
263         i->size = size;
264         i->ret  = 0;
265
266         err = flush_buf(i);
267         if (err)
268                 return err;
269
270         if (!i->size || !bkey_cmp(POS_MAX, i->from))
271                 return i->ret;
272
273         for_each_btree_node(&iter, i->c, i->id, i->from, 0, b) {
274                 bch2_btree_node_to_text(&PBUF(i->buf), i->c, b);
275                 i->bytes = strlen(i->buf);
276                 err = flush_buf(i);
277                 if (err)
278                         break;
279
280                 /*
281                  * can't easily correctly restart a btree node traversal across
282                  * all nodes, meh
283                  */
284                 i->from = bkey_cmp(POS_MAX, b->key.k.p)
285                         ? bkey_successor(b->key.k.p)
286                         : b->key.k.p;
287
288                 if (!i->size)
289                         break;
290         }
291         bch2_btree_iter_unlock(&iter);
292
293         return err < 0 ? err : i->ret;
294 }
295
296 static const struct file_operations btree_format_debug_ops = {
297         .owner          = THIS_MODULE,
298         .open           = bch2_dump_open,
299         .release        = bch2_dump_release,
300         .read           = bch2_read_btree_formats,
301 };
302
303 static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
304                                        size_t size, loff_t *ppos)
305 {
306         struct dump_iter *i = file->private_data;
307         struct btree_iter iter;
308         struct bkey_s_c k;
309         struct btree *prev_node = NULL;
310         int err;
311
312         i->ubuf = buf;
313         i->size = size;
314         i->ret  = 0;
315
316         err = flush_buf(i);
317         if (err)
318                 return err;
319
320         if (!i->size)
321                 return i->ret;
322
323         bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
324
325         while ((k = bch2_btree_iter_peek(&iter)).k &&
326                !(err = btree_iter_err(k))) {
327                 struct btree_iter_level *l = &iter.l[0];
328                 struct bkey_packed *_k =
329                         bch2_btree_node_iter_peek(&l->iter, l->b);
330
331                 if (l->b != prev_node) {
332                         bch2_btree_node_to_text(&PBUF(i->buf), i->c, l->b);
333                         i->bytes = strlen(i->buf);
334                         err = flush_buf(i);
335                         if (err)
336                                 break;
337                 }
338                 prev_node = l->b;
339
340                 bch2_bfloat_to_text(&PBUF(i->buf), l->b, _k);
341                 i->bytes = strlen(i->buf);
342                 err = flush_buf(i);
343                 if (err)
344                         break;
345
346                 bch2_btree_iter_next(&iter);
347                 i->from = iter.pos;
348
349                 err = flush_buf(i);
350                 if (err)
351                         break;
352
353                 if (!i->size)
354                         break;
355         }
356         bch2_btree_iter_unlock(&iter);
357
358         return err < 0 ? err : i->ret;
359 }
360
361 static const struct file_operations bfloat_failed_debug_ops = {
362         .owner          = THIS_MODULE,
363         .open           = bch2_dump_open,
364         .release        = bch2_dump_release,
365         .read           = bch2_read_bfloat_failed,
366 };
367
368 void bch2_fs_debug_exit(struct bch_fs *c)
369 {
370         if (!IS_ERR_OR_NULL(c->debug))
371                 debugfs_remove_recursive(c->debug);
372 }
373
374 void bch2_fs_debug_init(struct bch_fs *c)
375 {
376         struct btree_debug *bd;
377         char name[100];
378
379         if (IS_ERR_OR_NULL(bch_debug))
380                 return;
381
382         snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
383         c->debug = debugfs_create_dir(name, bch_debug);
384         if (IS_ERR_OR_NULL(c->debug))
385                 return;
386
387         for (bd = c->btree_debug;
388              bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
389              bd++) {
390                 bd->id = bd - c->btree_debug;
391                 bd->btree = debugfs_create_file(bch2_btree_ids[bd->id],
392                                                 0400, c->debug, bd,
393                                                 &btree_debug_ops);
394
395                 snprintf(name, sizeof(name), "%s-formats",
396                          bch2_btree_ids[bd->id]);
397
398                 bd->btree_format = debugfs_create_file(name, 0400, c->debug, bd,
399                                                        &btree_format_debug_ops);
400
401                 snprintf(name, sizeof(name), "%s-bfloat-failed",
402                          bch2_btree_ids[bd->id]);
403
404                 bd->failed = debugfs_create_file(name, 0400, c->debug, bd,
405                                                  &bfloat_failed_debug_ops);
406         }
407 }
408
409 #endif
410
411 void bch2_debug_exit(void)
412 {
413         if (!IS_ERR_OR_NULL(bch_debug))
414                 debugfs_remove_recursive(bch_debug);
415 }
416
417 int __init bch2_debug_init(void)
418 {
419         int ret = 0;
420
421         bch_debug = debugfs_create_dir("bcachefs", NULL);
422         return ret;
423 }