]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/tests.c
Makefile: detect rst2man
[bcachefs-tools-debian] / libbcachefs / tests.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
3
4 #include "bcachefs.h"
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
7 #include "tests.h"
8
9 #include "linux/kthread.h"
10 #include "linux/random.h"
11
12 static void delete_test_keys(struct bch_fs *c)
13 {
14         int ret;
15
16         ret = bch2_btree_delete_range(c, BTREE_ID_extents,
17                                       POS(0, 0), POS(0, U64_MAX),
18                                       NULL);
19         BUG_ON(ret);
20
21         ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
22                                       POS(0, 0), POS(0, U64_MAX),
23                                       NULL);
24         BUG_ON(ret);
25 }
26
27 /* unit tests */
28
29 static int test_delete(struct bch_fs *c, u64 nr)
30 {
31         struct btree_trans trans;
32         struct btree_iter *iter;
33         struct bkey_i_cookie k;
34         int ret;
35
36         bkey_cookie_init(&k.k_i);
37         k.k.p.snapshot = U32_MAX;
38
39         bch2_trans_init(&trans, c, 0, 0);
40
41         iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
42                                    BTREE_ITER_INTENT);
43
44         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
45                 bch2_btree_iter_traverse(iter) ?:
46                 bch2_trans_update(&trans, iter, &k.k_i, 0));
47         if (ret) {
48                 bch_err(c, "update error in test_delete: %i", ret);
49                 goto err;
50         }
51
52         pr_info("deleting once");
53         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
54                 bch2_btree_iter_traverse(iter) ?:
55                 bch2_btree_delete_at(&trans, iter, 0));
56         if (ret) {
57                 bch_err(c, "delete error (first) in test_delete: %i", ret);
58                 goto err;
59         }
60
61         pr_info("deleting twice");
62         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
63                 bch2_btree_iter_traverse(iter) ?:
64                 bch2_btree_delete_at(&trans, iter, 0));
65         if (ret) {
66                 bch_err(c, "delete error (second) in test_delete: %i", ret);
67                 goto err;
68         }
69 err:
70         bch2_trans_iter_put(&trans, iter);
71         bch2_trans_exit(&trans);
72         return ret;
73 }
74
75 static int test_delete_written(struct bch_fs *c, u64 nr)
76 {
77         struct btree_trans trans;
78         struct btree_iter *iter;
79         struct bkey_i_cookie k;
80         int ret;
81
82         bkey_cookie_init(&k.k_i);
83         k.k.p.snapshot = U32_MAX;
84
85         bch2_trans_init(&trans, c, 0, 0);
86
87         iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
88                                    BTREE_ITER_INTENT);
89
90         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
91                 bch2_btree_iter_traverse(iter) ?:
92                 bch2_trans_update(&trans, iter, &k.k_i, 0));
93         if (ret) {
94                 bch_err(c, "update error in test_delete_written: %i", ret);
95                 goto err;
96         }
97
98         bch2_trans_unlock(&trans);
99         bch2_journal_flush_all_pins(&c->journal);
100
101         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
102                 bch2_btree_iter_traverse(iter) ?:
103                 bch2_btree_delete_at(&trans, iter, 0));
104         if (ret) {
105                 bch_err(c, "delete error in test_delete_written: %i", ret);
106                 goto err;
107         }
108 err:
109         bch2_trans_iter_put(&trans, iter);
110         bch2_trans_exit(&trans);
111         return ret;
112 }
113
114 static int test_iterate(struct bch_fs *c, u64 nr)
115 {
116         struct btree_trans trans;
117         struct btree_iter *iter = NULL;
118         struct bkey_s_c k;
119         u64 i;
120         int ret = 0;
121
122         bch2_trans_init(&trans, c, 0, 0);
123
124         delete_test_keys(c);
125
126         pr_info("inserting test keys");
127
128         for (i = 0; i < nr; i++) {
129                 struct bkey_i_cookie k;
130
131                 bkey_cookie_init(&k.k_i);
132                 k.k.p.offset = i;
133                 k.k.p.snapshot = U32_MAX;
134
135                 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
136                                         NULL, NULL, 0);
137                 if (ret) {
138                         bch_err(c, "insert error in test_iterate: %i", ret);
139                         goto err;
140                 }
141         }
142
143         pr_info("iterating forwards");
144
145         i = 0;
146
147         for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
148                            POS_MIN, 0, k, ret) {
149                 if (k.k->p.inode)
150                         break;
151
152                 BUG_ON(k.k->p.offset != i++);
153         }
154
155         BUG_ON(i != nr);
156
157         pr_info("iterating backwards");
158
159         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k))
160                 BUG_ON(k.k->p.offset != --i);
161
162         BUG_ON(i);
163 err:
164         bch2_trans_iter_put(&trans, iter);
165         bch2_trans_exit(&trans);
166         return ret;
167 }
168
169 static int test_iterate_extents(struct bch_fs *c, u64 nr)
170 {
171         struct btree_trans trans;
172         struct btree_iter *iter = NULL;
173         struct bkey_s_c k;
174         u64 i;
175         int ret = 0;
176
177         bch2_trans_init(&trans, c, 0, 0);
178
179         delete_test_keys(c);
180
181         pr_info("inserting test extents");
182
183         for (i = 0; i < nr; i += 8) {
184                 struct bkey_i_cookie k;
185
186                 bkey_cookie_init(&k.k_i);
187                 k.k.p.offset = i + 8;
188                 k.k.p.snapshot = U32_MAX;
189                 k.k.size = 8;
190
191                 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
192                                         NULL, NULL, 0);
193                 if (ret) {
194                         bch_err(c, "insert error in test_iterate_extents: %i", ret);
195                         goto err;
196                 }
197         }
198
199         pr_info("iterating forwards");
200
201         i = 0;
202
203         for_each_btree_key(&trans, iter, BTREE_ID_extents,
204                            POS_MIN, 0, k, ret) {
205                 BUG_ON(bkey_start_offset(k.k) != i);
206                 i = k.k->p.offset;
207         }
208
209         BUG_ON(i != nr);
210
211         pr_info("iterating backwards");
212
213         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k)) {
214                 BUG_ON(k.k->p.offset != i);
215                 i = bkey_start_offset(k.k);
216         }
217
218         BUG_ON(i);
219 err:
220         bch2_trans_iter_put(&trans, iter);
221         bch2_trans_exit(&trans);
222         return ret;
223 }
224
225 static int test_iterate_slots(struct bch_fs *c, u64 nr)
226 {
227         struct btree_trans trans;
228         struct btree_iter *iter;
229         struct bkey_s_c k;
230         u64 i;
231         int ret = 0;
232
233         bch2_trans_init(&trans, c, 0, 0);
234
235         delete_test_keys(c);
236
237         pr_info("inserting test keys");
238
239         for (i = 0; i < nr; i++) {
240                 struct bkey_i_cookie k;
241
242                 bkey_cookie_init(&k.k_i);
243                 k.k.p.offset = i * 2;
244                 k.k.p.snapshot = U32_MAX;
245
246                 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
247                                         NULL, NULL, 0);
248                 if (ret) {
249                         bch_err(c, "insert error in test_iterate_slots: %i", ret);
250                         goto err;
251                 }
252         }
253
254         pr_info("iterating forwards");
255
256         i = 0;
257
258         for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
259                            0, k, ret) {
260                 if (k.k->p.inode)
261                         break;
262
263                 BUG_ON(k.k->p.offset != i);
264                 i += 2;
265         }
266         bch2_trans_iter_put(&trans, iter);
267
268         BUG_ON(i != nr * 2);
269
270         pr_info("iterating forwards by slots");
271
272         i = 0;
273
274         for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
275                            BTREE_ITER_SLOTS, k, ret) {
276                 BUG_ON(k.k->p.offset != i);
277                 BUG_ON(bkey_deleted(k.k) != (i & 1));
278
279                 i++;
280                 if (i == nr * 2)
281                         break;
282         }
283         bch2_trans_iter_put(&trans, iter);
284 err:
285         bch2_trans_exit(&trans);
286         return ret;
287 }
288
289 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
290 {
291         struct btree_trans trans;
292         struct btree_iter *iter;
293         struct bkey_s_c k;
294         u64 i;
295         int ret = 0;
296
297         bch2_trans_init(&trans, c, 0, 0);
298
299         delete_test_keys(c);
300
301         pr_info("inserting test keys");
302
303         for (i = 0; i < nr; i += 16) {
304                 struct bkey_i_cookie k;
305
306                 bkey_cookie_init(&k.k_i);
307                 k.k.p.offset = i + 16;
308                 k.k.p.snapshot = U32_MAX;
309                 k.k.size = 8;
310
311                 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
312                                         NULL, NULL, 0);
313                 if (ret) {
314                         bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
315                         goto err;
316                 }
317         }
318
319         pr_info("iterating forwards");
320
321         i = 0;
322
323         for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
324                            0, k, ret) {
325                 BUG_ON(bkey_start_offset(k.k) != i + 8);
326                 BUG_ON(k.k->size != 8);
327                 i += 16;
328         }
329         bch2_trans_iter_put(&trans, iter);
330
331         BUG_ON(i != nr);
332
333         pr_info("iterating forwards by slots");
334
335         i = 0;
336
337         for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
338                            BTREE_ITER_SLOTS, k, ret) {
339                 BUG_ON(bkey_deleted(k.k) != !(i % 16));
340
341                 BUG_ON(bkey_start_offset(k.k) != i);
342                 BUG_ON(k.k->size != 8);
343                 i = k.k->p.offset;
344
345                 if (i == nr)
346                         break;
347         }
348         bch2_trans_iter_put(&trans, iter);
349 err:
350         bch2_trans_exit(&trans);
351         return 0;
352 }
353
354 /*
355  * XXX: we really want to make sure we've got a btree with depth > 0 for these
356  * tests
357  */
358 static int test_peek_end(struct bch_fs *c, u64 nr)
359 {
360         struct btree_trans trans;
361         struct btree_iter *iter;
362         struct bkey_s_c k;
363
364         bch2_trans_init(&trans, c, 0, 0);
365
366         iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
367
368         k = bch2_btree_iter_peek(iter);
369         BUG_ON(k.k);
370
371         k = bch2_btree_iter_peek(iter);
372         BUG_ON(k.k);
373
374         bch2_trans_iter_put(&trans, iter);
375
376         bch2_trans_exit(&trans);
377         return 0;
378 }
379
380 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
381 {
382         struct btree_trans trans;
383         struct btree_iter *iter;
384         struct bkey_s_c k;
385
386         bch2_trans_init(&trans, c, 0, 0);
387
388         iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN, 0);
389
390         k = bch2_btree_iter_peek(iter);
391         BUG_ON(k.k);
392
393         k = bch2_btree_iter_peek(iter);
394         BUG_ON(k.k);
395
396         bch2_trans_iter_put(&trans, iter);
397
398         bch2_trans_exit(&trans);
399         return 0;
400 }
401
402 /* extent unit tests */
403
404 u64 test_version;
405
406 static int insert_test_extent(struct bch_fs *c,
407                               u64 start, u64 end)
408 {
409         struct bkey_i_cookie k;
410         int ret;
411
412         //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
413
414         bkey_cookie_init(&k.k_i);
415         k.k_i.k.p.offset = end;
416         k.k_i.k.p.snapshot = U32_MAX;
417         k.k_i.k.size = end - start;
418         k.k_i.k.version.lo = test_version++;
419
420         ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
421                                 NULL, NULL, 0);
422         if (ret)
423                 bch_err(c, "insert error in insert_test_extent: %i", ret);
424         return ret;
425 }
426
427 static int __test_extent_overwrite(struct bch_fs *c,
428                                     u64 e1_start, u64 e1_end,
429                                     u64 e2_start, u64 e2_end)
430 {
431         int ret;
432
433         ret   = insert_test_extent(c, e1_start, e1_end) ?:
434                 insert_test_extent(c, e2_start, e2_end);
435
436         delete_test_keys(c);
437         return ret;
438 }
439
440 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
441 {
442         return  __test_extent_overwrite(c, 0, 64, 0, 32) ?:
443                 __test_extent_overwrite(c, 8, 64, 0, 32);
444 }
445
446 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
447 {
448         return  __test_extent_overwrite(c, 0, 64, 32, 64) ?:
449                 __test_extent_overwrite(c, 0, 64, 32, 72);
450 }
451
452 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
453 {
454         return __test_extent_overwrite(c, 0, 64, 32, 40);
455 }
456
457 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
458 {
459         return  __test_extent_overwrite(c, 32, 64,  0,  64) ?:
460                 __test_extent_overwrite(c, 32, 64,  0, 128) ?:
461                 __test_extent_overwrite(c, 32, 64, 32,  64) ?:
462                 __test_extent_overwrite(c, 32, 64, 32, 128);
463 }
464
465 /* perf tests */
466
467 static u64 test_rand(void)
468 {
469         u64 v;
470 #if 0
471         v = prandom_u32();
472 #else
473         prandom_bytes(&v, sizeof(v));
474 #endif
475         return v;
476 }
477
478 static int rand_insert(struct bch_fs *c, u64 nr)
479 {
480         struct btree_trans trans;
481         struct bkey_i_cookie k;
482         int ret = 0;
483         u64 i;
484
485         bch2_trans_init(&trans, c, 0, 0);
486
487         for (i = 0; i < nr; i++) {
488                 bkey_cookie_init(&k.k_i);
489                 k.k.p.offset = test_rand();
490                 k.k.p.snapshot = U32_MAX;
491
492                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
493                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
494                 if (ret) {
495                         bch_err(c, "error in rand_insert: %i", ret);
496                         break;
497                 }
498         }
499
500         bch2_trans_exit(&trans);
501         return ret;
502 }
503
504 static int rand_insert_multi(struct bch_fs *c, u64 nr)
505 {
506         struct btree_trans trans;
507         struct bkey_i_cookie k[8];
508         int ret = 0;
509         unsigned j;
510         u64 i;
511
512         bch2_trans_init(&trans, c, 0, 0);
513
514         for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
515                 for (j = 0; j < ARRAY_SIZE(k); j++) {
516                         bkey_cookie_init(&k[j].k_i);
517                         k[j].k.p.offset = test_rand();
518                         k[j].k.p.snapshot = U32_MAX;
519                 }
520
521                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
522                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?:
523                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?:
524                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?:
525                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?:
526                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?:
527                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?:
528                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
529                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
530                 if (ret) {
531                         bch_err(c, "error in rand_insert_multi: %i", ret);
532                         break;
533                 }
534         }
535
536         bch2_trans_exit(&trans);
537         return ret;
538 }
539
540 static int rand_lookup(struct bch_fs *c, u64 nr)
541 {
542         struct btree_trans trans;
543         struct btree_iter *iter;
544         struct bkey_s_c k;
545         int ret = 0;
546         u64 i;
547
548         bch2_trans_init(&trans, c, 0, 0);
549         iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
550
551         for (i = 0; i < nr; i++) {
552                 bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
553
554                 k = bch2_btree_iter_peek(iter);
555                 ret = bkey_err(k);
556                 if (ret) {
557                         bch_err(c, "error in rand_lookup: %i", ret);
558                         break;
559                 }
560         }
561
562         bch2_trans_iter_put(&trans, iter);
563         bch2_trans_exit(&trans);
564         return ret;
565 }
566
567 static int rand_mixed(struct bch_fs *c, u64 nr)
568 {
569         struct btree_trans trans;
570         struct btree_iter *iter;
571         struct bkey_s_c k;
572         int ret = 0;
573         u64 i;
574
575         bch2_trans_init(&trans, c, 0, 0);
576         iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
577
578         for (i = 0; i < nr; i++) {
579                 bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
580
581                 k = bch2_btree_iter_peek(iter);
582                 ret = bkey_err(k);
583                 if (ret) {
584                         bch_err(c, "lookup error in rand_mixed: %i", ret);
585                         break;
586                 }
587
588                 if (!(i & 3) && k.k) {
589                         struct bkey_i_cookie k;
590
591                         bkey_cookie_init(&k.k_i);
592                         k.k.p = iter->pos;
593
594                         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
595                                 bch2_btree_iter_traverse(iter) ?:
596                                 bch2_trans_update(&trans, iter, &k.k_i, 0));
597                         if (ret) {
598                                 bch_err(c, "update error in rand_mixed: %i", ret);
599                                 break;
600                         }
601                 }
602         }
603
604         bch2_trans_iter_put(&trans, iter);
605         bch2_trans_exit(&trans);
606         return ret;
607 }
608
609 static int __do_delete(struct btree_trans *trans, struct bpos pos)
610 {
611         struct btree_iter *iter;
612         struct bkey_i delete;
613         struct bkey_s_c k;
614         int ret = 0;
615
616         iter = bch2_trans_get_iter(trans, BTREE_ID_xattrs, pos,
617                                    BTREE_ITER_INTENT);
618         k = bch2_btree_iter_peek(iter);
619         ret = bkey_err(k);
620         if (ret)
621                 goto err;
622
623         if (!k.k)
624                 goto err;
625
626         bkey_init(&delete.k);
627         delete.k.p = k.k->p;
628
629         ret = bch2_trans_update(trans, iter, &delete, 0);
630 err:
631         bch2_trans_iter_put(trans, iter);
632         return ret;
633 }
634
635 static int rand_delete(struct bch_fs *c, u64 nr)
636 {
637         struct btree_trans trans;
638         int ret = 0;
639         u64 i;
640
641         bch2_trans_init(&trans, c, 0, 0);
642
643         for (i = 0; i < nr; i++) {
644                 struct bpos pos = POS(0, test_rand());
645
646                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
647                         __do_delete(&trans, pos));
648                 if (ret) {
649                         bch_err(c, "error in rand_delete: %i", ret);
650                         break;
651                 }
652         }
653
654         bch2_trans_exit(&trans);
655         return ret;
656 }
657
658 static int seq_insert(struct bch_fs *c, u64 nr)
659 {
660         struct btree_trans trans;
661         struct btree_iter *iter;
662         struct bkey_s_c k;
663         struct bkey_i_cookie insert;
664         int ret = 0;
665         u64 i = 0;
666
667         bkey_cookie_init(&insert.k_i);
668
669         bch2_trans_init(&trans, c, 0, 0);
670
671         for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
672                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
673                 insert.k.p = iter->pos;
674
675                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
676                         bch2_btree_iter_traverse(iter) ?:
677                         bch2_trans_update(&trans, iter, &insert.k_i, 0));
678                 if (ret) {
679                         bch_err(c, "error in seq_insert: %i", ret);
680                         break;
681                 }
682
683                 if (++i == nr)
684                         break;
685         }
686         bch2_trans_iter_put(&trans, iter);
687
688         bch2_trans_exit(&trans);
689         return ret;
690 }
691
692 static int seq_lookup(struct bch_fs *c, u64 nr)
693 {
694         struct btree_trans trans;
695         struct btree_iter *iter;
696         struct bkey_s_c k;
697         int ret = 0;
698
699         bch2_trans_init(&trans, c, 0, 0);
700
701         for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, 0, k, ret)
702                 ;
703         bch2_trans_iter_put(&trans, iter);
704
705         bch2_trans_exit(&trans);
706         return ret;
707 }
708
709 static int seq_overwrite(struct bch_fs *c, u64 nr)
710 {
711         struct btree_trans trans;
712         struct btree_iter *iter;
713         struct bkey_s_c k;
714         int ret = 0;
715
716         bch2_trans_init(&trans, c, 0, 0);
717
718         for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
719                            BTREE_ITER_INTENT, k, ret) {
720                 struct bkey_i_cookie u;
721
722                 bkey_reassemble(&u.k_i, k);
723
724                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
725                         bch2_btree_iter_traverse(iter) ?:
726                         bch2_trans_update(&trans, iter, &u.k_i, 0));
727                 if (ret) {
728                         bch_err(c, "error in seq_overwrite: %i", ret);
729                         break;
730                 }
731         }
732         bch2_trans_iter_put(&trans, iter);
733
734         bch2_trans_exit(&trans);
735         return ret;
736 }
737
738 static int seq_delete(struct bch_fs *c, u64 nr)
739 {
740         int ret;
741
742         ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
743                                       POS(0, 0), POS(0, U64_MAX),
744                                       NULL);
745         if (ret)
746                 bch_err(c, "error in seq_delete: %i", ret);
747         return ret;
748 }
749
750 typedef int (*perf_test_fn)(struct bch_fs *, u64);
751
752 struct test_job {
753         struct bch_fs                   *c;
754         u64                             nr;
755         unsigned                        nr_threads;
756         perf_test_fn                    fn;
757
758         atomic_t                        ready;
759         wait_queue_head_t               ready_wait;
760
761         atomic_t                        done;
762         struct completion               done_completion;
763
764         u64                             start;
765         u64                             finish;
766         int                             ret;
767 };
768
769 static int btree_perf_test_thread(void *data)
770 {
771         struct test_job *j = data;
772         int ret;
773
774         if (atomic_dec_and_test(&j->ready)) {
775                 wake_up(&j->ready_wait);
776                 j->start = sched_clock();
777         } else {
778                 wait_event(j->ready_wait, !atomic_read(&j->ready));
779         }
780
781         ret = j->fn(j->c, j->nr / j->nr_threads);
782         if (ret)
783                 j->ret = ret;
784
785         if (atomic_dec_and_test(&j->done)) {
786                 j->finish = sched_clock();
787                 complete(&j->done_completion);
788         }
789
790         return 0;
791 }
792
793 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
794                          u64 nr, unsigned nr_threads)
795 {
796         struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
797         char name_buf[20], nr_buf[20], per_sec_buf[20];
798         unsigned i;
799         u64 time;
800
801         atomic_set(&j.ready, nr_threads);
802         init_waitqueue_head(&j.ready_wait);
803
804         atomic_set(&j.done, nr_threads);
805         init_completion(&j.done_completion);
806
807 #define perf_test(_test)                                \
808         if (!strcmp(testname, #_test)) j.fn = _test
809
810         perf_test(rand_insert);
811         perf_test(rand_insert_multi);
812         perf_test(rand_lookup);
813         perf_test(rand_mixed);
814         perf_test(rand_delete);
815
816         perf_test(seq_insert);
817         perf_test(seq_lookup);
818         perf_test(seq_overwrite);
819         perf_test(seq_delete);
820
821         /* a unit test, not a perf test: */
822         perf_test(test_delete);
823         perf_test(test_delete_written);
824         perf_test(test_iterate);
825         perf_test(test_iterate_extents);
826         perf_test(test_iterate_slots);
827         perf_test(test_iterate_slots_extents);
828         perf_test(test_peek_end);
829         perf_test(test_peek_end_extents);
830
831         perf_test(test_extent_overwrite_front);
832         perf_test(test_extent_overwrite_back);
833         perf_test(test_extent_overwrite_middle);
834         perf_test(test_extent_overwrite_all);
835
836         if (!j.fn) {
837                 pr_err("unknown test %s", testname);
838                 return -EINVAL;
839         }
840
841         //pr_info("running test %s:", testname);
842
843         if (nr_threads == 1)
844                 btree_perf_test_thread(&j);
845         else
846                 for (i = 0; i < nr_threads; i++)
847                         kthread_run(btree_perf_test_thread, &j,
848                                     "bcachefs perf test[%u]", i);
849
850         while (wait_for_completion_interruptible(&j.done_completion))
851                 ;
852
853         time = j.finish - j.start;
854
855         scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
856         bch2_hprint(&PBUF(nr_buf), nr);
857         bch2_hprint(&PBUF(per_sec_buf), nr * NSEC_PER_SEC / time);
858         printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
859                 name_buf, nr_buf, nr_threads,
860                 time / NSEC_PER_SEC,
861                 time * nr_threads / nr,
862                 per_sec_buf);
863         return j.ret;
864 }
865
866 #endif /* CONFIG_BCACHEFS_TESTS */