]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/tests.c
24de979bdfb67484a67d26983c547e36563f7182
[bcachefs-tools-debian] / libbcachefs / tests.c
1 #ifdef CONFIG_BCACHEFS_TESTS
2
3 #include "bcachefs.h"
4 #include "btree_update.h"
5 #include "journal_reclaim.h"
6 #include "tests.h"
7
8 #include "linux/kthread.h"
9 #include "linux/random.h"
10
11 static void delete_test_keys(struct bch_fs *c)
12 {
13         int ret;
14
15         ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
16                                       POS(0, 0), POS(0, U64_MAX),
17                                       NULL);
18         BUG_ON(ret);
19
20         ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
21                                       POS(0, 0), POS(0, U64_MAX),
22                                       NULL);
23         BUG_ON(ret);
24 }
25
26 /* unit tests */
27
28 static void test_delete(struct bch_fs *c, u64 nr)
29 {
30         struct btree_iter iter;
31         struct bkey_i_cookie k;
32         int ret;
33
34         bkey_cookie_init(&k.k_i);
35
36         bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
37                              BTREE_ITER_INTENT);
38
39         ret = bch2_btree_iter_traverse(&iter);
40         BUG_ON(ret);
41
42         ret = bch2_btree_insert_at(c, NULL, NULL, 0,
43                                    BTREE_INSERT_ENTRY(&iter, &k.k_i));
44         BUG_ON(ret);
45
46         pr_info("deleting once");
47         ret = bch2_btree_delete_at(&iter, 0);
48         BUG_ON(ret);
49
50         pr_info("deleting twice");
51         ret = bch2_btree_delete_at(&iter, 0);
52         BUG_ON(ret);
53
54         bch2_btree_iter_unlock(&iter);
55 }
56
57 static void test_delete_written(struct bch_fs *c, u64 nr)
58 {
59         struct btree_iter iter;
60         struct bkey_i_cookie k;
61         int ret;
62
63         bkey_cookie_init(&k.k_i);
64
65         bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
66                              BTREE_ITER_INTENT);
67
68         ret = bch2_btree_iter_traverse(&iter);
69         BUG_ON(ret);
70
71         ret = bch2_btree_insert_at(c, NULL, NULL, 0,
72                                    BTREE_INSERT_ENTRY(&iter, &k.k_i));
73         BUG_ON(ret);
74
75         bch2_journal_flush_all_pins(&c->journal);
76
77         ret = bch2_btree_delete_at(&iter, 0);
78         BUG_ON(ret);
79
80         bch2_btree_iter_unlock(&iter);
81 }
82
83 static void test_iterate(struct bch_fs *c, u64 nr)
84 {
85         struct btree_iter iter;
86         struct bkey_s_c k;
87         u64 i;
88         int ret;
89
90         delete_test_keys(c);
91
92         pr_info("inserting test keys");
93
94         for (i = 0; i < nr; i++) {
95                 struct bkey_i_cookie k;
96
97                 bkey_cookie_init(&k.k_i);
98                 k.k.p.offset = i;
99
100                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
101                                         NULL, NULL, 0);
102                 BUG_ON(ret);
103         }
104
105         pr_info("iterating forwards");
106
107         i = 0;
108
109         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k)
110                 BUG_ON(k.k->p.offset != i++);
111         bch2_btree_iter_unlock(&iter);
112
113         BUG_ON(i != nr);
114
115         pr_info("iterating backwards");
116
117         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
118                 BUG_ON(k.k->p.offset != --i);
119         bch2_btree_iter_unlock(&iter);
120
121         BUG_ON(i);
122 }
123
124 static void test_iterate_extents(struct bch_fs *c, u64 nr)
125 {
126         struct btree_iter iter;
127         struct bkey_s_c k;
128         u64 i;
129         int ret;
130
131         delete_test_keys(c);
132
133         pr_info("inserting test extents");
134
135         for (i = 0; i < nr; i += 8) {
136                 struct bkey_i_cookie k;
137
138                 bkey_cookie_init(&k.k_i);
139                 k.k.p.offset = i + 8;
140                 k.k.size = 8;
141
142                 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
143                                         NULL, NULL, 0);
144                 BUG_ON(ret);
145         }
146
147         pr_info("iterating forwards");
148
149         i = 0;
150
151         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
152                 BUG_ON(bkey_start_offset(k.k) != i);
153                 i = k.k->p.offset;
154         }
155         bch2_btree_iter_unlock(&iter);
156
157         BUG_ON(i != nr);
158
159         pr_info("iterating backwards");
160
161         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
162                 BUG_ON(k.k->p.offset != i);
163                 i = bkey_start_offset(k.k);
164         }
165         bch2_btree_iter_unlock(&iter);
166
167         BUG_ON(i);
168 }
169
170 static void test_iterate_slots(struct bch_fs *c, u64 nr)
171 {
172         struct btree_iter iter;
173         struct bkey_s_c k;
174         u64 i;
175         int ret;
176
177         delete_test_keys(c);
178
179         pr_info("inserting test keys");
180
181         for (i = 0; i < nr; i++) {
182                 struct bkey_i_cookie k;
183
184                 bkey_cookie_init(&k.k_i);
185                 k.k.p.offset = i * 2;
186
187                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
188                                         NULL, NULL, 0);
189                 BUG_ON(ret);
190         }
191
192         pr_info("iterating forwards");
193
194         i = 0;
195
196         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k) {
197                 BUG_ON(k.k->p.offset != i);
198                 i += 2;
199         }
200         bch2_btree_iter_unlock(&iter);
201
202         BUG_ON(i != nr * 2);
203
204         pr_info("iterating forwards by slots");
205
206         i = 0;
207
208         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0),
209                            BTREE_ITER_SLOTS, k) {
210                 BUG_ON(bkey_deleted(k.k) != (i & 1));
211                 BUG_ON(k.k->p.offset != i++);
212
213                 if (i == nr * 2)
214                         break;
215         }
216         bch2_btree_iter_unlock(&iter);
217 }
218
219 static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
220 {
221         struct btree_iter iter;
222         struct bkey_s_c k;
223         u64 i;
224         int ret;
225
226         delete_test_keys(c);
227
228         pr_info("inserting test keys");
229
230         for (i = 0; i < nr; i += 16) {
231                 struct bkey_i_cookie k;
232
233                 bkey_cookie_init(&k.k_i);
234                 k.k.p.offset = i + 16;
235                 k.k.size = 8;
236
237                 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
238                                         NULL, NULL, 0);
239                 BUG_ON(ret);
240         }
241
242         pr_info("iterating forwards");
243
244         i = 0;
245
246         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
247                 BUG_ON(bkey_start_offset(k.k) != i + 8);
248                 BUG_ON(k.k->size != 8);
249                 i += 16;
250         }
251         bch2_btree_iter_unlock(&iter);
252
253         BUG_ON(i != nr);
254
255         pr_info("iterating forwards by slots");
256
257         i = 0;
258
259         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0),
260                            BTREE_ITER_SLOTS, k) {
261                 BUG_ON(bkey_deleted(k.k) != !(i % 16));
262
263                 BUG_ON(bkey_start_offset(k.k) != i);
264                 BUG_ON(k.k->size != 8);
265                 i = k.k->p.offset;
266
267                 if (i == nr)
268                         break;
269         }
270         bch2_btree_iter_unlock(&iter);
271 }
272
273 /*
274  * XXX: we really want to make sure we've got a btree with depth > 0 for these
275  * tests
276  */
277 static void test_peek_end(struct bch_fs *c, u64 nr)
278 {
279         struct btree_iter iter;
280         struct bkey_s_c k;
281
282         bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0);
283
284         k = bch2_btree_iter_peek(&iter);
285         BUG_ON(k.k);
286
287         k = bch2_btree_iter_peek(&iter);
288         BUG_ON(k.k);
289
290         bch2_btree_iter_unlock(&iter);
291 }
292
293 static void test_peek_end_extents(struct bch_fs *c, u64 nr)
294 {
295         struct btree_iter iter;
296         struct bkey_s_c k;
297
298         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
299
300         k = bch2_btree_iter_peek(&iter);
301         BUG_ON(k.k);
302
303         k = bch2_btree_iter_peek(&iter);
304         BUG_ON(k.k);
305
306         bch2_btree_iter_unlock(&iter);
307 }
308
309 /* extent unit tests */
310
311 u64 test_version;
312
313 static void insert_test_extent(struct bch_fs *c,
314                                u64 start, u64 end)
315 {
316         struct bkey_i_cookie k;
317         int ret;
318
319         //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
320
321         bkey_cookie_init(&k.k_i);
322         k.k_i.k.p.offset = end;
323         k.k_i.k.size = end - start;
324         k.k_i.k.version.lo = test_version++;
325
326         ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
327                                 NULL, NULL, 0);
328         BUG_ON(ret);
329 }
330
331 static void __test_extent_overwrite(struct bch_fs *c,
332                                     u64 e1_start, u64 e1_end,
333                                     u64 e2_start, u64 e2_end)
334 {
335         insert_test_extent(c, e1_start, e1_end);
336         insert_test_extent(c, e2_start, e2_end);
337
338         delete_test_keys(c);
339 }
340
341 static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
342 {
343         __test_extent_overwrite(c, 0, 64, 0, 32);
344         __test_extent_overwrite(c, 8, 64, 0, 32);
345 }
346
347 static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
348 {
349         __test_extent_overwrite(c, 0, 64, 32, 64);
350         __test_extent_overwrite(c, 0, 64, 32, 72);
351 }
352
353 static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
354 {
355         __test_extent_overwrite(c, 0, 64, 32, 40);
356 }
357
358 static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
359 {
360         __test_extent_overwrite(c, 32, 64,  0,  64);
361         __test_extent_overwrite(c, 32, 64,  0, 128);
362         __test_extent_overwrite(c, 32, 64, 32,  64);
363         __test_extent_overwrite(c, 32, 64, 32, 128);
364 }
365
366 /* perf tests */
367
368 static u64 test_rand(void)
369 {
370         u64 v;
371 #if 0
372         v = prandom_u32();
373 #else
374         prandom_bytes(&v, sizeof(v));
375 #endif
376         return v;
377 }
378
379 static void rand_insert(struct bch_fs *c, u64 nr)
380 {
381         struct bkey_i_cookie k;
382         int ret;
383         u64 i;
384
385         for (i = 0; i < nr; i++) {
386                 bkey_cookie_init(&k.k_i);
387                 k.k.p.offset = test_rand();
388
389                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
390                                         NULL, NULL, 0);
391                 BUG_ON(ret);
392         }
393 }
394
395 static void rand_lookup(struct bch_fs *c, u64 nr)
396 {
397         u64 i;
398
399         for (i = 0; i < nr; i++) {
400                 struct btree_iter iter;
401                 struct bkey_s_c k;
402
403                 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
404                                      POS(0, test_rand()), 0);
405
406                 k = bch2_btree_iter_peek(&iter);
407                 bch2_btree_iter_unlock(&iter);
408         }
409 }
410
411 static void rand_mixed(struct bch_fs *c, u64 nr)
412 {
413         int ret;
414         u64 i;
415
416         for (i = 0; i < nr; i++) {
417                 struct btree_iter iter;
418                 struct bkey_s_c k;
419
420                 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
421                                      POS(0, test_rand()), 0);
422
423                 k = bch2_btree_iter_peek(&iter);
424
425                 if (!(i & 3) && k.k) {
426                         struct bkey_i_cookie k;
427
428                         bkey_cookie_init(&k.k_i);
429                         k.k.p = iter.pos;
430
431                         ret = bch2_btree_insert_at(c, NULL, NULL, 0,
432                                                    BTREE_INSERT_ENTRY(&iter, &k.k_i));
433                         BUG_ON(ret);
434                 }
435
436                 bch2_btree_iter_unlock(&iter);
437         }
438
439 }
440
441 static void rand_delete(struct bch_fs *c, u64 nr)
442 {
443         struct bkey_i k;
444         int ret;
445         u64 i;
446
447         for (i = 0; i < nr; i++) {
448                 bkey_init(&k.k);
449                 k.k.p.offset = test_rand();
450
451                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
452                                         NULL, NULL, 0);
453                 BUG_ON(ret);
454         }
455 }
456
457 static void seq_insert(struct bch_fs *c, u64 nr)
458 {
459         struct btree_iter iter;
460         struct bkey_s_c k;
461         struct bkey_i_cookie insert;
462         int ret;
463         u64 i = 0;
464
465         bkey_cookie_init(&insert.k_i);
466
467         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
468                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
469                 insert.k.p = iter.pos;
470
471                 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
472                                 BTREE_INSERT_ENTRY(&iter, &insert.k_i));
473                 BUG_ON(ret);
474
475                 if (++i == nr)
476                         break;
477         }
478         bch2_btree_iter_unlock(&iter);
479 }
480
481 static void seq_lookup(struct bch_fs *c, u64 nr)
482 {
483         struct btree_iter iter;
484         struct bkey_s_c k;
485
486         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k)
487                 ;
488         bch2_btree_iter_unlock(&iter);
489 }
490
491 static void seq_overwrite(struct bch_fs *c, u64 nr)
492 {
493         struct btree_iter iter;
494         struct bkey_s_c k;
495         int ret;
496
497         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
498                            BTREE_ITER_INTENT, k) {
499                 struct bkey_i_cookie u;
500
501                 bkey_reassemble(&u.k_i, k);
502
503                 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
504                                            BTREE_INSERT_ENTRY(&iter, &u.k_i));
505                 BUG_ON(ret);
506         }
507         bch2_btree_iter_unlock(&iter);
508 }
509
510 static void seq_delete(struct bch_fs *c, u64 nr)
511 {
512         int ret;
513
514         ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
515                                       POS(0, 0), POS(0, U64_MAX),
516                                       NULL);
517         BUG_ON(ret);
518 }
519
520 typedef void (*perf_test_fn)(struct bch_fs *, u64);
521
522 struct test_job {
523         struct bch_fs                   *c;
524         u64                             nr;
525         unsigned                        nr_threads;
526         perf_test_fn                    fn;
527
528         atomic_t                        ready;
529         wait_queue_head_t               ready_wait;
530
531         atomic_t                        done;
532         struct completion               done_completion;
533
534         u64                             start;
535         u64                             finish;
536 };
537
538 static int btree_perf_test_thread(void *data)
539 {
540         struct test_job *j = data;
541
542         if (atomic_dec_and_test(&j->ready)) {
543                 wake_up(&j->ready_wait);
544                 j->start = sched_clock();
545         } else {
546                 wait_event(j->ready_wait, !atomic_read(&j->ready));
547         }
548
549         j->fn(j->c, j->nr / j->nr_threads);
550
551         if (atomic_dec_and_test(&j->done)) {
552                 j->finish = sched_clock();
553                 complete(&j->done_completion);
554         }
555
556         return 0;
557 }
558
559 void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
560                           u64 nr, unsigned nr_threads)
561 {
562         struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
563         char name_buf[20], nr_buf[20], per_sec_buf[20];
564         unsigned i;
565         u64 time;
566
567         atomic_set(&j.ready, nr_threads);
568         init_waitqueue_head(&j.ready_wait);
569
570         atomic_set(&j.done, nr_threads);
571         init_completion(&j.done_completion);
572
573 #define perf_test(_test)                                \
574         if (!strcmp(testname, #_test)) j.fn = _test
575
576         perf_test(rand_insert);
577         perf_test(rand_lookup);
578         perf_test(rand_mixed);
579         perf_test(rand_delete);
580
581         perf_test(seq_insert);
582         perf_test(seq_lookup);
583         perf_test(seq_overwrite);
584         perf_test(seq_delete);
585
586         /* a unit test, not a perf test: */
587         perf_test(test_delete);
588         perf_test(test_delete_written);
589         perf_test(test_iterate);
590         perf_test(test_iterate_extents);
591         perf_test(test_iterate_slots);
592         perf_test(test_iterate_slots_extents);
593         perf_test(test_peek_end);
594         perf_test(test_peek_end_extents);
595
596         perf_test(test_extent_overwrite_front);
597         perf_test(test_extent_overwrite_back);
598         perf_test(test_extent_overwrite_middle);
599         perf_test(test_extent_overwrite_all);
600
601         if (!j.fn) {
602                 pr_err("unknown test %s", testname);
603                 return;
604         }
605
606         //pr_info("running test %s:", testname);
607
608         if (nr_threads == 1)
609                 btree_perf_test_thread(&j);
610         else
611                 for (i = 0; i < nr_threads; i++)
612                         kthread_run(btree_perf_test_thread, &j,
613                                     "bcachefs perf test[%u]", i);
614
615         while (wait_for_completion_interruptible(&j.done_completion))
616                 ;
617
618         time = j.finish - j.start;
619
620         scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
621         bch2_hprint(nr_buf, nr);
622         bch2_hprint(per_sec_buf, nr * NSEC_PER_SEC / time);
623         printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
624                 name_buf, nr_buf, nr_threads,
625                 time / NSEC_PER_SEC,
626                 time * nr_threads / nr,
627                 per_sec_buf);
628 }
629
630 #endif /* CONFIG_BCACHEFS_TESTS */