]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/tests.c
Update bcachefs sources to 078a1a596a bcachefs: Optimize bucket reuse
[bcachefs-tools-debian] / libbcachefs / tests.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
3
4 #include "bcachefs.h"
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
7 #include "tests.h"
8
9 #include "linux/kthread.h"
10 #include "linux/random.h"
11
12 static void delete_test_keys(struct bch_fs *c)
13 {
14         int ret;
15
16         ret = bch2_btree_delete_range(c, BTREE_ID_extents,
17                                       SPOS(0, 0, U32_MAX),
18                                       SPOS(0, U64_MAX, U32_MAX),
19                                       NULL);
20         BUG_ON(ret);
21
22         ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
23                                       SPOS(0, 0, U32_MAX),
24                                       SPOS(0, U64_MAX, U32_MAX),
25                                       NULL);
26         BUG_ON(ret);
27 }
28
29 /* unit tests */
30
31 static int test_delete(struct bch_fs *c, u64 nr)
32 {
33         struct btree_trans trans;
34         struct btree_iter iter;
35         struct bkey_i_cookie k;
36         int ret;
37
38         bkey_cookie_init(&k.k_i);
39         k.k.p.snapshot = U32_MAX;
40
41         bch2_trans_init(&trans, c, 0, 0);
42         bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
43                              BTREE_ITER_INTENT);
44
45         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
46                 bch2_btree_iter_traverse(&iter) ?:
47                 bch2_trans_update(&trans, &iter, &k.k_i, 0));
48         if (ret) {
49                 bch_err(c, "update error in test_delete: %i", ret);
50                 goto err;
51         }
52
53         pr_info("deleting once");
54         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
55                 bch2_btree_iter_traverse(&iter) ?:
56                 bch2_btree_delete_at(&trans, &iter, 0));
57         if (ret) {
58                 bch_err(c, "delete error (first) in test_delete: %i", ret);
59                 goto err;
60         }
61
62         pr_info("deleting twice");
63         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
64                 bch2_btree_iter_traverse(&iter) ?:
65                 bch2_btree_delete_at(&trans, &iter, 0));
66         if (ret) {
67                 bch_err(c, "delete error (second) in test_delete: %i", ret);
68                 goto err;
69         }
70 err:
71         bch2_trans_iter_exit(&trans, &iter);
72         bch2_trans_exit(&trans);
73         return ret;
74 }
75
76 static int test_delete_written(struct bch_fs *c, u64 nr)
77 {
78         struct btree_trans trans;
79         struct btree_iter iter;
80         struct bkey_i_cookie k;
81         int ret;
82
83         bkey_cookie_init(&k.k_i);
84         k.k.p.snapshot = U32_MAX;
85
86         bch2_trans_init(&trans, c, 0, 0);
87
88         bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
89                              BTREE_ITER_INTENT);
90
91         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
92                 bch2_btree_iter_traverse(&iter) ?:
93                 bch2_trans_update(&trans, &iter, &k.k_i, 0));
94         if (ret) {
95                 bch_err(c, "update error in test_delete_written: %i", ret);
96                 goto err;
97         }
98
99         bch2_trans_unlock(&trans);
100         bch2_journal_flush_all_pins(&c->journal);
101
102         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
103                 bch2_btree_iter_traverse(&iter) ?:
104                 bch2_btree_delete_at(&trans, &iter, 0));
105         if (ret) {
106                 bch_err(c, "delete error in test_delete_written: %i", ret);
107                 goto err;
108         }
109 err:
110         bch2_trans_iter_exit(&trans, &iter);
111         bch2_trans_exit(&trans);
112         return ret;
113 }
114
115 static int test_iterate(struct bch_fs *c, u64 nr)
116 {
117         struct btree_trans trans;
118         struct btree_iter iter = { NULL };
119         struct bkey_s_c k;
120         u64 i;
121         int ret = 0;
122
123         bch2_trans_init(&trans, c, 0, 0);
124
125         delete_test_keys(c);
126
127         pr_info("inserting test keys");
128
129         for (i = 0; i < nr; i++) {
130                 struct bkey_i_cookie k;
131
132                 bkey_cookie_init(&k.k_i);
133                 k.k.p.offset = i;
134                 k.k.p.snapshot = U32_MAX;
135
136                 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
137                                         NULL, NULL, 0);
138                 if (ret) {
139                         bch_err(c, "insert error in test_iterate: %i", ret);
140                         goto err;
141                 }
142         }
143
144         pr_info("iterating forwards");
145
146         i = 0;
147
148         for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
149                            POS_MIN, 0, k, ret) {
150                 if (k.k->p.inode)
151                         break;
152
153                 BUG_ON(k.k->p.offset != i++);
154         }
155
156         BUG_ON(i != nr);
157
158         pr_info("iterating backwards");
159
160         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
161                 BUG_ON(k.k->p.offset != --i);
162
163         BUG_ON(i);
164 err:
165         bch2_trans_iter_exit(&trans, &iter);
166         bch2_trans_exit(&trans);
167         return ret;
168 }
169
170 static int test_iterate_extents(struct bch_fs *c, u64 nr)
171 {
172         struct btree_trans trans;
173         struct btree_iter iter = { NULL };
174         struct bkey_s_c k;
175         u64 i;
176         int ret = 0;
177
178         bch2_trans_init(&trans, c, 0, 0);
179
180         delete_test_keys(c);
181
182         pr_info("inserting test extents");
183
184         for (i = 0; i < nr; i += 8) {
185                 struct bkey_i_cookie k;
186
187                 bkey_cookie_init(&k.k_i);
188                 k.k.p.offset = i + 8;
189                 k.k.p.snapshot = U32_MAX;
190                 k.k.size = 8;
191
192                 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
193                                         NULL, NULL, 0);
194                 if (ret) {
195                         bch_err(c, "insert error in test_iterate_extents: %i", ret);
196                         goto err;
197                 }
198         }
199
200         pr_info("iterating forwards");
201
202         i = 0;
203
204         for_each_btree_key(&trans, iter, BTREE_ID_extents,
205                            POS_MIN, 0, k, ret) {
206                 BUG_ON(bkey_start_offset(k.k) != i);
207                 i = k.k->p.offset;
208         }
209
210         BUG_ON(i != nr);
211
212         pr_info("iterating backwards");
213
214         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
215                 BUG_ON(k.k->p.offset != i);
216                 i = bkey_start_offset(k.k);
217         }
218
219         BUG_ON(i);
220 err:
221         bch2_trans_iter_exit(&trans, &iter);
222         bch2_trans_exit(&trans);
223         return ret;
224 }
225
226 static int test_iterate_slots(struct bch_fs *c, u64 nr)
227 {
228         struct btree_trans trans;
229         struct btree_iter iter = { NULL };
230         struct bkey_s_c k;
231         u64 i;
232         int ret = 0;
233
234         bch2_trans_init(&trans, c, 0, 0);
235
236         delete_test_keys(c);
237
238         pr_info("inserting test keys");
239
240         for (i = 0; i < nr; i++) {
241                 struct bkey_i_cookie k;
242
243                 bkey_cookie_init(&k.k_i);
244                 k.k.p.offset = i * 2;
245                 k.k.p.snapshot = U32_MAX;
246
247                 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
248                                         NULL, NULL, 0);
249                 if (ret) {
250                         bch_err(c, "insert error in test_iterate_slots: %i", ret);
251                         goto err;
252                 }
253         }
254
255         pr_info("iterating forwards");
256
257         i = 0;
258
259         for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
260                            0, k, ret) {
261                 if (k.k->p.inode)
262                         break;
263
264                 BUG_ON(k.k->p.offset != i);
265                 i += 2;
266         }
267         bch2_trans_iter_exit(&trans, &iter);
268
269         BUG_ON(i != nr * 2);
270
271         pr_info("iterating forwards by slots");
272
273         i = 0;
274
275         for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
276                            BTREE_ITER_SLOTS, k, ret) {
277                 BUG_ON(k.k->p.offset != i);
278                 BUG_ON(bkey_deleted(k.k) != (i & 1));
279
280                 i++;
281                 if (i == nr * 2)
282                         break;
283         }
284         bch2_trans_iter_exit(&trans, &iter);
285 err:
286         bch2_trans_exit(&trans);
287         return ret;
288 }
289
290 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
291 {
292         struct btree_trans trans;
293         struct btree_iter iter = { NULL };
294         struct bkey_s_c k;
295         u64 i;
296         int ret = 0;
297
298         bch2_trans_init(&trans, c, 0, 0);
299
300         delete_test_keys(c);
301
302         pr_info("inserting test keys");
303
304         for (i = 0; i < nr; i += 16) {
305                 struct bkey_i_cookie k;
306
307                 bkey_cookie_init(&k.k_i);
308                 k.k.p.offset = i + 16;
309                 k.k.p.snapshot = U32_MAX;
310                 k.k.size = 8;
311
312                 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
313                                         NULL, NULL, 0);
314                 if (ret) {
315                         bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
316                         goto err;
317                 }
318         }
319
320         pr_info("iterating forwards");
321
322         i = 0;
323
324         for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
325                            0, k, ret) {
326                 BUG_ON(bkey_start_offset(k.k) != i + 8);
327                 BUG_ON(k.k->size != 8);
328                 i += 16;
329         }
330         bch2_trans_iter_exit(&trans, &iter);
331
332         BUG_ON(i != nr);
333
334         pr_info("iterating forwards by slots");
335
336         i = 0;
337
338         for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
339                            BTREE_ITER_SLOTS, k, ret) {
340                 BUG_ON(bkey_deleted(k.k) != !(i % 16));
341
342                 BUG_ON(bkey_start_offset(k.k) != i);
343                 BUG_ON(k.k->size != 8);
344                 i = k.k->p.offset;
345
346                 if (i == nr)
347                         break;
348         }
349         bch2_trans_iter_exit(&trans, &iter);
350 err:
351         bch2_trans_exit(&trans);
352         return 0;
353 }
354
355 /*
356  * XXX: we really want to make sure we've got a btree with depth > 0 for these
357  * tests
358  */
359 static int test_peek_end(struct bch_fs *c, u64 nr)
360 {
361         struct btree_trans trans;
362         struct btree_iter iter;
363         struct bkey_s_c k;
364
365         bch2_trans_init(&trans, c, 0, 0);
366         bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, POS_MIN, 0);
367
368         k = bch2_btree_iter_peek(&iter);
369         BUG_ON(k.k);
370
371         k = bch2_btree_iter_peek(&iter);
372         BUG_ON(k.k);
373
374         bch2_trans_iter_exit(&trans, &iter);
375         bch2_trans_exit(&trans);
376         return 0;
377 }
378
379 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
380 {
381         struct btree_trans trans;
382         struct btree_iter iter;
383         struct bkey_s_c k;
384
385         bch2_trans_init(&trans, c, 0, 0);
386         bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, POS_MIN, 0);
387
388         k = bch2_btree_iter_peek(&iter);
389         BUG_ON(k.k);
390
391         k = bch2_btree_iter_peek(&iter);
392         BUG_ON(k.k);
393
394         bch2_trans_iter_exit(&trans, &iter);
395         bch2_trans_exit(&trans);
396         return 0;
397 }
398
399 /* extent unit tests */
400
401 u64 test_version;
402
403 static int insert_test_extent(struct bch_fs *c,
404                               u64 start, u64 end)
405 {
406         struct bkey_i_cookie k;
407         int ret;
408
409         //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
410
411         bkey_cookie_init(&k.k_i);
412         k.k_i.k.p.offset = end;
413         k.k_i.k.p.snapshot = U32_MAX;
414         k.k_i.k.size = end - start;
415         k.k_i.k.version.lo = test_version++;
416
417         ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
418                                 NULL, NULL, 0);
419         if (ret)
420                 bch_err(c, "insert error in insert_test_extent: %i", ret);
421         return ret;
422 }
423
424 static int __test_extent_overwrite(struct bch_fs *c,
425                                     u64 e1_start, u64 e1_end,
426                                     u64 e2_start, u64 e2_end)
427 {
428         int ret;
429
430         ret   = insert_test_extent(c, e1_start, e1_end) ?:
431                 insert_test_extent(c, e2_start, e2_end);
432
433         delete_test_keys(c);
434         return ret;
435 }
436
437 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
438 {
439         return  __test_extent_overwrite(c, 0, 64, 0, 32) ?:
440                 __test_extent_overwrite(c, 8, 64, 0, 32);
441 }
442
443 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
444 {
445         return  __test_extent_overwrite(c, 0, 64, 32, 64) ?:
446                 __test_extent_overwrite(c, 0, 64, 32, 72);
447 }
448
449 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
450 {
451         return __test_extent_overwrite(c, 0, 64, 32, 40);
452 }
453
454 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
455 {
456         return  __test_extent_overwrite(c, 32, 64,  0,  64) ?:
457                 __test_extent_overwrite(c, 32, 64,  0, 128) ?:
458                 __test_extent_overwrite(c, 32, 64, 32,  64) ?:
459                 __test_extent_overwrite(c, 32, 64, 32, 128);
460 }
461
462 /* perf tests */
463
464 static u64 test_rand(void)
465 {
466         u64 v;
467 #if 0
468         v = prandom_u32();
469 #else
470         prandom_bytes(&v, sizeof(v));
471 #endif
472         return v;
473 }
474
475 static int rand_insert(struct bch_fs *c, u64 nr)
476 {
477         struct btree_trans trans;
478         struct bkey_i_cookie k;
479         int ret = 0;
480         u64 i;
481
482         bch2_trans_init(&trans, c, 0, 0);
483
484         for (i = 0; i < nr; i++) {
485                 bkey_cookie_init(&k.k_i);
486                 k.k.p.offset = test_rand();
487                 k.k.p.snapshot = U32_MAX;
488
489                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
490                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
491                 if (ret) {
492                         bch_err(c, "error in rand_insert: %i", ret);
493                         break;
494                 }
495         }
496
497         bch2_trans_exit(&trans);
498         return ret;
499 }
500
501 static int rand_insert_multi(struct bch_fs *c, u64 nr)
502 {
503         struct btree_trans trans;
504         struct bkey_i_cookie k[8];
505         int ret = 0;
506         unsigned j;
507         u64 i;
508
509         bch2_trans_init(&trans, c, 0, 0);
510
511         for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
512                 for (j = 0; j < ARRAY_SIZE(k); j++) {
513                         bkey_cookie_init(&k[j].k_i);
514                         k[j].k.p.offset = test_rand();
515                         k[j].k.p.snapshot = U32_MAX;
516                 }
517
518                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
519                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?:
520                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?:
521                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?:
522                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?:
523                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?:
524                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?:
525                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
526                         __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
527                 if (ret) {
528                         bch_err(c, "error in rand_insert_multi: %i", ret);
529                         break;
530                 }
531         }
532
533         bch2_trans_exit(&trans);
534         return ret;
535 }
536
537 static int rand_lookup(struct bch_fs *c, u64 nr)
538 {
539         struct btree_trans trans;
540         struct btree_iter iter;
541         struct bkey_s_c k;
542         int ret = 0;
543         u64 i;
544
545         bch2_trans_init(&trans, c, 0, 0);
546         bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
547                              SPOS(0, 0, U32_MAX), 0);
548
549         for (i = 0; i < nr; i++) {
550                 bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
551
552                 k = bch2_btree_iter_peek(&iter);
553                 ret = bkey_err(k);
554                 if (ret) {
555                         bch_err(c, "error in rand_lookup: %i", ret);
556                         break;
557                 }
558         }
559
560         bch2_trans_iter_exit(&trans, &iter);
561         bch2_trans_exit(&trans);
562         return ret;
563 }
564
565 static int rand_mixed_trans(struct btree_trans *trans,
566                             struct btree_iter *iter,
567                             struct bkey_i_cookie *cookie,
568                             u64 i, u64 pos)
569 {
570         struct bkey_s_c k;
571         int ret;
572
573         bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
574
575         k = bch2_btree_iter_peek(iter);
576         ret = bkey_err(k);
577         if (ret && ret != -EINTR)
578                 bch_err(trans->c, "lookup error in rand_mixed: %i", ret);
579         if (ret)
580                 return ret;
581
582         if (!(i & 3) && k.k) {
583                 bkey_cookie_init(&cookie->k_i);
584                 cookie->k.p = iter->pos;
585                 ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
586         }
587
588         return ret;
589 }
590
591 static int rand_mixed(struct bch_fs *c, u64 nr)
592 {
593         struct btree_trans trans;
594         struct btree_iter iter;
595         struct bkey_i_cookie cookie;
596         int ret = 0;
597         u64 i, rand;
598
599         bch2_trans_init(&trans, c, 0, 0);
600         bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
601                              SPOS(0, 0, U32_MAX), 0);
602
603         for (i = 0; i < nr; i++) {
604                 rand = test_rand();
605                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
606                         rand_mixed_trans(&trans, &iter, &cookie, i, rand));
607                 if (ret) {
608                         bch_err(c, "update error in rand_mixed: %i", ret);
609                         break;
610                 }
611         }
612
613         bch2_trans_iter_exit(&trans, &iter);
614         bch2_trans_exit(&trans);
615         return ret;
616 }
617
618 static int __do_delete(struct btree_trans *trans, struct bpos pos)
619 {
620         struct btree_iter iter;
621         struct bkey_s_c k;
622         int ret = 0;
623
624         bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
625                              BTREE_ITER_INTENT);
626         k = bch2_btree_iter_peek(&iter);
627         ret = bkey_err(k);
628         if (ret)
629                 goto err;
630
631         if (!k.k)
632                 goto err;
633
634         ret = bch2_btree_delete_at(trans, &iter, 0);
635 err:
636         bch2_trans_iter_exit(trans, &iter);
637         return ret;
638 }
639
640 static int rand_delete(struct bch_fs *c, u64 nr)
641 {
642         struct btree_trans trans;
643         int ret = 0;
644         u64 i;
645
646         bch2_trans_init(&trans, c, 0, 0);
647
648         for (i = 0; i < nr; i++) {
649                 struct bpos pos = SPOS(0, test_rand(), U32_MAX);
650
651                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
652                         __do_delete(&trans, pos));
653                 if (ret) {
654                         bch_err(c, "error in rand_delete: %i", ret);
655                         break;
656                 }
657         }
658
659         bch2_trans_exit(&trans);
660         return ret;
661 }
662
663 static int seq_insert(struct bch_fs *c, u64 nr)
664 {
665         struct btree_trans trans;
666         struct btree_iter iter;
667         struct bkey_s_c k;
668         struct bkey_i_cookie insert;
669         int ret = 0;
670         u64 i = 0;
671
672         bkey_cookie_init(&insert.k_i);
673
674         bch2_trans_init(&trans, c, 0, 0);
675
676         for_each_btree_key(&trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX),
677                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
678                 insert.k.p = iter.pos;
679
680                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
681                         bch2_btree_iter_traverse(&iter) ?:
682                         bch2_trans_update(&trans, &iter, &insert.k_i, 0));
683                 if (ret) {
684                         bch_err(c, "error in seq_insert: %i", ret);
685                         break;
686                 }
687
688                 if (++i == nr)
689                         break;
690         }
691         bch2_trans_iter_exit(&trans, &iter);
692
693         bch2_trans_exit(&trans);
694         return ret;
695 }
696
697 static int seq_lookup(struct bch_fs *c, u64 nr)
698 {
699         struct btree_trans trans;
700         struct btree_iter iter;
701         struct bkey_s_c k;
702         int ret = 0;
703
704         bch2_trans_init(&trans, c, 0, 0);
705
706         for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
707                            SPOS(0, 0, U32_MAX), 0, k, ret)
708                 ;
709         bch2_trans_iter_exit(&trans, &iter);
710
711         bch2_trans_exit(&trans);
712         return ret;
713 }
714
715 static int seq_overwrite(struct bch_fs *c, u64 nr)
716 {
717         struct btree_trans trans;
718         struct btree_iter iter;
719         struct bkey_s_c k;
720         int ret = 0;
721
722         bch2_trans_init(&trans, c, 0, 0);
723
724         for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
725                            SPOS(0, 0, U32_MAX),
726                            BTREE_ITER_INTENT, k, ret) {
727                 struct bkey_i_cookie u;
728
729                 bkey_reassemble(&u.k_i, k);
730
731                 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
732                         bch2_btree_iter_traverse(&iter) ?:
733                         bch2_trans_update(&trans, &iter, &u.k_i, 0));
734                 if (ret) {
735                         bch_err(c, "error in seq_overwrite: %i", ret);
736                         break;
737                 }
738         }
739         bch2_trans_iter_exit(&trans, &iter);
740
741         bch2_trans_exit(&trans);
742         return ret;
743 }
744
745 static int seq_delete(struct bch_fs *c, u64 nr)
746 {
747         int ret;
748
749         ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
750                                       SPOS(0, 0, U32_MAX), POS_MAX, NULL);
751         if (ret)
752                 bch_err(c, "error in seq_delete: %i", ret);
753         return ret;
754 }
755
756 typedef int (*perf_test_fn)(struct bch_fs *, u64);
757
758 struct test_job {
759         struct bch_fs                   *c;
760         u64                             nr;
761         unsigned                        nr_threads;
762         perf_test_fn                    fn;
763
764         atomic_t                        ready;
765         wait_queue_head_t               ready_wait;
766
767         atomic_t                        done;
768         struct completion               done_completion;
769
770         u64                             start;
771         u64                             finish;
772         int                             ret;
773 };
774
775 static int btree_perf_test_thread(void *data)
776 {
777         struct test_job *j = data;
778         int ret;
779
780         if (atomic_dec_and_test(&j->ready)) {
781                 wake_up(&j->ready_wait);
782                 j->start = sched_clock();
783         } else {
784                 wait_event(j->ready_wait, !atomic_read(&j->ready));
785         }
786
787         ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
788         if (ret)
789                 j->ret = ret;
790
791         if (atomic_dec_and_test(&j->done)) {
792                 j->finish = sched_clock();
793                 complete(&j->done_completion);
794         }
795
796         return 0;
797 }
798
799 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
800                          u64 nr, unsigned nr_threads)
801 {
802         struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
803         char name_buf[20], nr_buf[20], per_sec_buf[20];
804         unsigned i;
805         u64 time;
806
807         atomic_set(&j.ready, nr_threads);
808         init_waitqueue_head(&j.ready_wait);
809
810         atomic_set(&j.done, nr_threads);
811         init_completion(&j.done_completion);
812
813 #define perf_test(_test)                                \
814         if (!strcmp(testname, #_test)) j.fn = _test
815
816         perf_test(rand_insert);
817         perf_test(rand_insert_multi);
818         perf_test(rand_lookup);
819         perf_test(rand_mixed);
820         perf_test(rand_delete);
821
822         perf_test(seq_insert);
823         perf_test(seq_lookup);
824         perf_test(seq_overwrite);
825         perf_test(seq_delete);
826
827         /* a unit test, not a perf test: */
828         perf_test(test_delete);
829         perf_test(test_delete_written);
830         perf_test(test_iterate);
831         perf_test(test_iterate_extents);
832         perf_test(test_iterate_slots);
833         perf_test(test_iterate_slots_extents);
834         perf_test(test_peek_end);
835         perf_test(test_peek_end_extents);
836
837         perf_test(test_extent_overwrite_front);
838         perf_test(test_extent_overwrite_back);
839         perf_test(test_extent_overwrite_middle);
840         perf_test(test_extent_overwrite_all);
841
842         if (!j.fn) {
843                 pr_err("unknown test %s", testname);
844                 return -EINVAL;
845         }
846
847         //pr_info("running test %s:", testname);
848
849         if (nr_threads == 1)
850                 btree_perf_test_thread(&j);
851         else
852                 for (i = 0; i < nr_threads; i++)
853                         kthread_run(btree_perf_test_thread, &j,
854                                     "bcachefs perf test[%u]", i);
855
856         while (wait_for_completion_interruptible(&j.done_completion))
857                 ;
858
859         time = j.finish - j.start;
860
861         scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
862         bch2_hprint(&PBUF(nr_buf), nr);
863         bch2_hprint(&PBUF(per_sec_buf), div64_u64(nr * NSEC_PER_SEC, time));
864         printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
865                 name_buf, nr_buf, nr_threads,
866                 div_u64(time, NSEC_PER_SEC),
867                 div_u64(time * nr_threads, nr),
868                 per_sec_buf);
869         return j.ret;
870 }
871
872 #endif /* CONFIG_BCACHEFS_TESTS */