]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/tests.c
c9362af5651184c14deebb9134aa54540d4c9b8f
[bcachefs-tools-debian] / libbcachefs / tests.c
1 #ifdef CONFIG_BCACHEFS_TESTS
2
3 #include "bcachefs.h"
4 #include "btree_update.h"
5 #include "journal_reclaim.h"
6 #include "tests.h"
7
8 #include "linux/kthread.h"
9 #include "linux/random.h"
10
11 static void delete_test_keys(struct bch_fs *c)
12 {
13         int ret;
14
15         ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
16                                       POS(0, 0), POS(0, U64_MAX),
17                                       NULL);
18         BUG_ON(ret);
19
20         ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
21                                       POS(0, 0), POS(0, U64_MAX),
22                                       NULL);
23         BUG_ON(ret);
24 }
25
26 /* unit tests */
27
28 static void test_delete(struct bch_fs *c, u64 nr)
29 {
30         struct btree_trans trans;
31         struct btree_iter *iter;
32         struct bkey_i_cookie k;
33         int ret;
34
35         bkey_cookie_init(&k.k_i);
36
37         bch2_trans_init(&trans, c);
38
39         iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
40                                    BTREE_ITER_INTENT);
41
42         ret = bch2_btree_iter_traverse(iter);
43         BUG_ON(ret);
44
45         bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
46         ret = bch2_trans_commit(&trans, NULL, NULL, 0);
47         BUG_ON(ret);
48
49         pr_info("deleting once");
50         ret = bch2_btree_delete_at(&trans, iter, 0);
51         BUG_ON(ret);
52
53         pr_info("deleting twice");
54         ret = bch2_btree_delete_at(&trans, iter, 0);
55         BUG_ON(ret);
56
57         bch2_trans_exit(&trans);
58 }
59
60 static void test_delete_written(struct bch_fs *c, u64 nr)
61 {
62         struct btree_trans trans;
63         struct btree_iter *iter;
64         struct bkey_i_cookie k;
65         int ret;
66
67         bkey_cookie_init(&k.k_i);
68
69         bch2_trans_init(&trans, c);
70
71         iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
72                                    BTREE_ITER_INTENT);
73
74         ret = bch2_btree_iter_traverse(iter);
75         BUG_ON(ret);
76
77         bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
78         ret = bch2_trans_commit(&trans, NULL, NULL, 0);
79         BUG_ON(ret);
80
81         bch2_journal_flush_all_pins(&c->journal);
82
83         ret = bch2_btree_delete_at(&trans, iter, 0);
84         BUG_ON(ret);
85
86         bch2_trans_exit(&trans);
87 }
88
89 static void test_iterate(struct bch_fs *c, u64 nr)
90 {
91         struct btree_iter iter;
92         struct bkey_s_c k;
93         u64 i;
94         int ret;
95
96         delete_test_keys(c);
97
98         pr_info("inserting test keys");
99
100         for (i = 0; i < nr; i++) {
101                 struct bkey_i_cookie k;
102
103                 bkey_cookie_init(&k.k_i);
104                 k.k.p.offset = i;
105
106                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
107                                         NULL, NULL, 0);
108                 BUG_ON(ret);
109         }
110
111         pr_info("iterating forwards");
112
113         i = 0;
114
115         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k)
116                 BUG_ON(k.k->p.offset != i++);
117         bch2_btree_iter_unlock(&iter);
118
119         BUG_ON(i != nr);
120
121         pr_info("iterating backwards");
122
123         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
124                 BUG_ON(k.k->p.offset != --i);
125         bch2_btree_iter_unlock(&iter);
126
127         BUG_ON(i);
128 }
129
130 static void test_iterate_extents(struct bch_fs *c, u64 nr)
131 {
132         struct btree_iter iter;
133         struct bkey_s_c k;
134         u64 i;
135         int ret;
136
137         delete_test_keys(c);
138
139         pr_info("inserting test extents");
140
141         for (i = 0; i < nr; i += 8) {
142                 struct bkey_i_cookie k;
143
144                 bkey_cookie_init(&k.k_i);
145                 k.k.p.offset = i + 8;
146                 k.k.size = 8;
147
148                 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
149                                         NULL, NULL, 0);
150                 BUG_ON(ret);
151         }
152
153         pr_info("iterating forwards");
154
155         i = 0;
156
157         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
158                 BUG_ON(bkey_start_offset(k.k) != i);
159                 i = k.k->p.offset;
160         }
161         bch2_btree_iter_unlock(&iter);
162
163         BUG_ON(i != nr);
164
165         pr_info("iterating backwards");
166
167         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
168                 BUG_ON(k.k->p.offset != i);
169                 i = bkey_start_offset(k.k);
170         }
171         bch2_btree_iter_unlock(&iter);
172
173         BUG_ON(i);
174 }
175
176 static void test_iterate_slots(struct bch_fs *c, u64 nr)
177 {
178         struct btree_iter iter;
179         struct bkey_s_c k;
180         u64 i;
181         int ret;
182
183         delete_test_keys(c);
184
185         pr_info("inserting test keys");
186
187         for (i = 0; i < nr; i++) {
188                 struct bkey_i_cookie k;
189
190                 bkey_cookie_init(&k.k_i);
191                 k.k.p.offset = i * 2;
192
193                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
194                                         NULL, NULL, 0);
195                 BUG_ON(ret);
196         }
197
198         pr_info("iterating forwards");
199
200         i = 0;
201
202         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k) {
203                 BUG_ON(k.k->p.offset != i);
204                 i += 2;
205         }
206         bch2_btree_iter_unlock(&iter);
207
208         BUG_ON(i != nr * 2);
209
210         pr_info("iterating forwards by slots");
211
212         i = 0;
213
214         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0),
215                            BTREE_ITER_SLOTS, k) {
216                 BUG_ON(bkey_deleted(k.k) != (i & 1));
217                 BUG_ON(k.k->p.offset != i++);
218
219                 if (i == nr * 2)
220                         break;
221         }
222         bch2_btree_iter_unlock(&iter);
223 }
224
225 static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
226 {
227         struct btree_iter iter;
228         struct bkey_s_c k;
229         u64 i;
230         int ret;
231
232         delete_test_keys(c);
233
234         pr_info("inserting test keys");
235
236         for (i = 0; i < nr; i += 16) {
237                 struct bkey_i_cookie k;
238
239                 bkey_cookie_init(&k.k_i);
240                 k.k.p.offset = i + 16;
241                 k.k.size = 8;
242
243                 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
244                                         NULL, NULL, 0);
245                 BUG_ON(ret);
246         }
247
248         pr_info("iterating forwards");
249
250         i = 0;
251
252         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
253                 BUG_ON(bkey_start_offset(k.k) != i + 8);
254                 BUG_ON(k.k->size != 8);
255                 i += 16;
256         }
257         bch2_btree_iter_unlock(&iter);
258
259         BUG_ON(i != nr);
260
261         pr_info("iterating forwards by slots");
262
263         i = 0;
264
265         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0),
266                            BTREE_ITER_SLOTS, k) {
267                 BUG_ON(bkey_deleted(k.k) != !(i % 16));
268
269                 BUG_ON(bkey_start_offset(k.k) != i);
270                 BUG_ON(k.k->size != 8);
271                 i = k.k->p.offset;
272
273                 if (i == nr)
274                         break;
275         }
276         bch2_btree_iter_unlock(&iter);
277 }
278
279 /*
280  * XXX: we really want to make sure we've got a btree with depth > 0 for these
281  * tests
282  */
283 static void test_peek_end(struct bch_fs *c, u64 nr)
284 {
285         struct btree_iter iter;
286         struct bkey_s_c k;
287
288         bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0);
289
290         k = bch2_btree_iter_peek(&iter);
291         BUG_ON(k.k);
292
293         k = bch2_btree_iter_peek(&iter);
294         BUG_ON(k.k);
295
296         bch2_btree_iter_unlock(&iter);
297 }
298
299 static void test_peek_end_extents(struct bch_fs *c, u64 nr)
300 {
301         struct btree_iter iter;
302         struct bkey_s_c k;
303
304         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
305
306         k = bch2_btree_iter_peek(&iter);
307         BUG_ON(k.k);
308
309         k = bch2_btree_iter_peek(&iter);
310         BUG_ON(k.k);
311
312         bch2_btree_iter_unlock(&iter);
313 }
314
315 /* extent unit tests */
316
317 u64 test_version;
318
319 static void insert_test_extent(struct bch_fs *c,
320                                u64 start, u64 end)
321 {
322         struct bkey_i_cookie k;
323         int ret;
324
325         //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
326
327         bkey_cookie_init(&k.k_i);
328         k.k_i.k.p.offset = end;
329         k.k_i.k.size = end - start;
330         k.k_i.k.version.lo = test_version++;
331
332         ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
333                                 NULL, NULL, 0);
334         BUG_ON(ret);
335 }
336
337 static void __test_extent_overwrite(struct bch_fs *c,
338                                     u64 e1_start, u64 e1_end,
339                                     u64 e2_start, u64 e2_end)
340 {
341         insert_test_extent(c, e1_start, e1_end);
342         insert_test_extent(c, e2_start, e2_end);
343
344         delete_test_keys(c);
345 }
346
347 static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
348 {
349         __test_extent_overwrite(c, 0, 64, 0, 32);
350         __test_extent_overwrite(c, 8, 64, 0, 32);
351 }
352
353 static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
354 {
355         __test_extent_overwrite(c, 0, 64, 32, 64);
356         __test_extent_overwrite(c, 0, 64, 32, 72);
357 }
358
359 static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
360 {
361         __test_extent_overwrite(c, 0, 64, 32, 40);
362 }
363
364 static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
365 {
366         __test_extent_overwrite(c, 32, 64,  0,  64);
367         __test_extent_overwrite(c, 32, 64,  0, 128);
368         __test_extent_overwrite(c, 32, 64, 32,  64);
369         __test_extent_overwrite(c, 32, 64, 32, 128);
370 }
371
372 /* perf tests */
373
374 static u64 test_rand(void)
375 {
376         u64 v;
377 #if 0
378         v = prandom_u32();
379 #else
380         prandom_bytes(&v, sizeof(v));
381 #endif
382         return v;
383 }
384
385 static void rand_insert(struct bch_fs *c, u64 nr)
386 {
387         struct bkey_i_cookie k;
388         int ret;
389         u64 i;
390
391         for (i = 0; i < nr; i++) {
392                 bkey_cookie_init(&k.k_i);
393                 k.k.p.offset = test_rand();
394
395                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
396                                         NULL, NULL, 0);
397                 BUG_ON(ret);
398         }
399 }
400
401 static void rand_lookup(struct bch_fs *c, u64 nr)
402 {
403         u64 i;
404
405         for (i = 0; i < nr; i++) {
406                 struct btree_iter iter;
407                 struct bkey_s_c k;
408
409                 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
410                                      POS(0, test_rand()), 0);
411
412                 k = bch2_btree_iter_peek(&iter);
413                 bch2_btree_iter_unlock(&iter);
414         }
415 }
416
417 static void rand_mixed(struct bch_fs *c, u64 nr)
418 {
419         int ret;
420         u64 i;
421
422         for (i = 0; i < nr; i++) {
423                 struct btree_trans trans;
424                 struct btree_iter *iter;
425                 struct bkey_s_c k;
426
427                 bch2_trans_init(&trans, c);
428
429                 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
430                                            POS(0, test_rand()), 0);
431
432                 k = bch2_btree_iter_peek(iter);
433
434                 if (!(i & 3) && k.k) {
435                         struct bkey_i_cookie k;
436
437                         bkey_cookie_init(&k.k_i);
438                         k.k.p = iter->pos;
439
440                         bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
441                         ret = bch2_trans_commit(&trans, NULL, NULL, 0);
442                         BUG_ON(ret);
443                 }
444
445                 bch2_trans_exit(&trans);
446         }
447
448 }
449
450 static void rand_delete(struct bch_fs *c, u64 nr)
451 {
452         struct bkey_i k;
453         int ret;
454         u64 i;
455
456         for (i = 0; i < nr; i++) {
457                 bkey_init(&k.k);
458                 k.k.p.offset = test_rand();
459
460                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
461                                         NULL, NULL, 0);
462                 BUG_ON(ret);
463         }
464 }
465
466 static void seq_insert(struct bch_fs *c, u64 nr)
467 {
468         struct btree_trans trans;
469         struct btree_iter *iter;
470         struct bkey_s_c k;
471         struct bkey_i_cookie insert;
472         int ret;
473         u64 i = 0;
474
475         bkey_cookie_init(&insert.k_i);
476
477         bch2_trans_init(&trans, c);
478
479         iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
480                                    BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
481
482         for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
483                 insert.k.p = iter->pos;
484
485                 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
486                 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
487                 BUG_ON(ret);
488
489                 if (++i == nr)
490                         break;
491         }
492         bch2_trans_exit(&trans);
493 }
494
495 static void seq_lookup(struct bch_fs *c, u64 nr)
496 {
497         struct btree_iter iter;
498         struct bkey_s_c k;
499
500         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k)
501                 ;
502         bch2_btree_iter_unlock(&iter);
503 }
504
505 static void seq_overwrite(struct bch_fs *c, u64 nr)
506 {
507         struct btree_trans trans;
508         struct btree_iter *iter;
509         struct bkey_s_c k;
510         int ret;
511
512         bch2_trans_init(&trans, c);
513
514         iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
515                                    BTREE_ITER_INTENT);
516
517         for_each_btree_key_continue(iter, 0, k) {
518                 struct bkey_i_cookie u;
519
520                 bkey_reassemble(&u.k_i, k);
521
522                 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &u.k_i));
523                 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
524                 BUG_ON(ret);
525         }
526         bch2_trans_exit(&trans);
527 }
528
529 static void seq_delete(struct bch_fs *c, u64 nr)
530 {
531         int ret;
532
533         ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
534                                       POS(0, 0), POS(0, U64_MAX),
535                                       NULL);
536         BUG_ON(ret);
537 }
538
539 typedef void (*perf_test_fn)(struct bch_fs *, u64);
540
541 struct test_job {
542         struct bch_fs                   *c;
543         u64                             nr;
544         unsigned                        nr_threads;
545         perf_test_fn                    fn;
546
547         atomic_t                        ready;
548         wait_queue_head_t               ready_wait;
549
550         atomic_t                        done;
551         struct completion               done_completion;
552
553         u64                             start;
554         u64                             finish;
555 };
556
557 static int btree_perf_test_thread(void *data)
558 {
559         struct test_job *j = data;
560
561         if (atomic_dec_and_test(&j->ready)) {
562                 wake_up(&j->ready_wait);
563                 j->start = sched_clock();
564         } else {
565                 wait_event(j->ready_wait, !atomic_read(&j->ready));
566         }
567
568         j->fn(j->c, j->nr / j->nr_threads);
569
570         if (atomic_dec_and_test(&j->done)) {
571                 j->finish = sched_clock();
572                 complete(&j->done_completion);
573         }
574
575         return 0;
576 }
577
578 void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
579                           u64 nr, unsigned nr_threads)
580 {
581         struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
582         char name_buf[20], nr_buf[20], per_sec_buf[20];
583         unsigned i;
584         u64 time;
585
586         atomic_set(&j.ready, nr_threads);
587         init_waitqueue_head(&j.ready_wait);
588
589         atomic_set(&j.done, nr_threads);
590         init_completion(&j.done_completion);
591
592 #define perf_test(_test)                                \
593         if (!strcmp(testname, #_test)) j.fn = _test
594
595         perf_test(rand_insert);
596         perf_test(rand_lookup);
597         perf_test(rand_mixed);
598         perf_test(rand_delete);
599
600         perf_test(seq_insert);
601         perf_test(seq_lookup);
602         perf_test(seq_overwrite);
603         perf_test(seq_delete);
604
605         /* a unit test, not a perf test: */
606         perf_test(test_delete);
607         perf_test(test_delete_written);
608         perf_test(test_iterate);
609         perf_test(test_iterate_extents);
610         perf_test(test_iterate_slots);
611         perf_test(test_iterate_slots_extents);
612         perf_test(test_peek_end);
613         perf_test(test_peek_end_extents);
614
615         perf_test(test_extent_overwrite_front);
616         perf_test(test_extent_overwrite_back);
617         perf_test(test_extent_overwrite_middle);
618         perf_test(test_extent_overwrite_all);
619
620         if (!j.fn) {
621                 pr_err("unknown test %s", testname);
622                 return;
623         }
624
625         //pr_info("running test %s:", testname);
626
627         if (nr_threads == 1)
628                 btree_perf_test_thread(&j);
629         else
630                 for (i = 0; i < nr_threads; i++)
631                         kthread_run(btree_perf_test_thread, &j,
632                                     "bcachefs perf test[%u]", i);
633
634         while (wait_for_completion_interruptible(&j.done_completion))
635                 ;
636
637         time = j.finish - j.start;
638
639         scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
640         bch2_hprint(&PBUF(nr_buf), nr);
641         bch2_hprint(&PBUF(per_sec_buf), nr * NSEC_PER_SEC / time);
642         printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
643                 name_buf, nr_buf, nr_threads,
644                 time / NSEC_PER_SEC,
645                 time * nr_threads / nr,
646                 per_sec_buf);
647 }
648
649 #endif /* CONFIG_BCACHEFS_TESTS */