1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
10 #include "linux/kthread.h"
11 #include "linux/random.h"
13 static void delete_test_keys(struct bch_fs *c)
17 ret = bch2_btree_delete_range(c, BTREE_ID_extents,
23 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
32 static int test_delete(struct bch_fs *c, u64 nr)
34 struct btree_trans trans;
35 struct btree_iter iter;
36 struct bkey_i_cookie k;
39 bkey_cookie_init(&k.k_i);
40 k.k.p.snapshot = U32_MAX;
42 bch2_trans_init(&trans, c, 0, 0);
43 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
46 ret = commit_do(&trans, NULL, NULL, 0,
47 bch2_btree_iter_traverse(&iter) ?:
48 bch2_trans_update(&trans, &iter, &k.k_i, 0));
50 bch_err(c, "%s(): update error in: %s", __func__, bch2_err_str(ret));
54 pr_info("deleting once");
55 ret = commit_do(&trans, NULL, NULL, 0,
56 bch2_btree_iter_traverse(&iter) ?:
57 bch2_btree_delete_at(&trans, &iter, 0));
59 bch_err(c, "%s(): delete error (first): %s", __func__, bch2_err_str(ret));
63 pr_info("deleting twice");
64 ret = commit_do(&trans, NULL, NULL, 0,
65 bch2_btree_iter_traverse(&iter) ?:
66 bch2_btree_delete_at(&trans, &iter, 0));
68 bch_err(c, "%s(): delete error (second): %s", __func__, bch2_err_str(ret));
72 bch2_trans_iter_exit(&trans, &iter);
73 bch2_trans_exit(&trans);
77 static int test_delete_written(struct bch_fs *c, u64 nr)
79 struct btree_trans trans;
80 struct btree_iter iter;
81 struct bkey_i_cookie k;
84 bkey_cookie_init(&k.k_i);
85 k.k.p.snapshot = U32_MAX;
87 bch2_trans_init(&trans, c, 0, 0);
89 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
92 ret = commit_do(&trans, NULL, NULL, 0,
93 bch2_btree_iter_traverse(&iter) ?:
94 bch2_trans_update(&trans, &iter, &k.k_i, 0));
96 bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
100 bch2_trans_unlock(&trans);
101 bch2_journal_flush_all_pins(&c->journal);
103 ret = commit_do(&trans, NULL, NULL, 0,
104 bch2_btree_iter_traverse(&iter) ?:
105 bch2_btree_delete_at(&trans, &iter, 0));
107 bch_err(c, "%s(): delete error: %s", __func__, bch2_err_str(ret));
111 bch2_trans_iter_exit(&trans, &iter);
112 bch2_trans_exit(&trans);
116 static int test_iterate(struct bch_fs *c, u64 nr)
118 struct btree_trans trans;
119 struct btree_iter iter = { NULL };
124 bch2_trans_init(&trans, c, 0, 0);
128 pr_info("inserting test keys");
130 for (i = 0; i < nr; i++) {
131 struct bkey_i_cookie k;
133 bkey_cookie_init(&k.k_i);
135 k.k.p.snapshot = U32_MAX;
137 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
140 bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
145 pr_info("iterating forwards");
149 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
150 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
152 BUG_ON(k.k->p.offset != i++);
156 bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
162 pr_info("iterating backwards");
164 ret = for_each_btree_key_reverse(&trans, iter, BTREE_ID_xattrs,
165 SPOS(0, U64_MAX, U32_MAX), 0, k,
167 BUG_ON(k.k->p.offset != --i);
171 bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
177 bch2_trans_iter_exit(&trans, &iter);
178 bch2_trans_exit(&trans);
182 static int test_iterate_extents(struct bch_fs *c, u64 nr)
184 struct btree_trans trans;
185 struct btree_iter iter = { NULL };
190 bch2_trans_init(&trans, c, 0, 0);
194 pr_info("inserting test extents");
196 for (i = 0; i < nr; i += 8) {
197 struct bkey_i_cookie k;
199 bkey_cookie_init(&k.k_i);
200 k.k.p.offset = i + 8;
201 k.k.p.snapshot = U32_MAX;
204 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
207 bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
212 pr_info("iterating forwards");
216 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
217 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
219 BUG_ON(bkey_start_offset(k.k) != i);
224 bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
230 pr_info("iterating backwards");
232 ret = for_each_btree_key_reverse(&trans, iter, BTREE_ID_extents,
233 SPOS(0, U64_MAX, U32_MAX), 0, k,
235 BUG_ON(k.k->p.offset != i);
236 i = bkey_start_offset(k.k);
240 bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
246 bch2_trans_iter_exit(&trans, &iter);
247 bch2_trans_exit(&trans);
251 static int test_iterate_slots(struct bch_fs *c, u64 nr)
253 struct btree_trans trans;
254 struct btree_iter iter = { NULL };
259 bch2_trans_init(&trans, c, 0, 0);
263 pr_info("inserting test keys");
265 for (i = 0; i < nr; i++) {
266 struct bkey_i_cookie k;
268 bkey_cookie_init(&k.k_i);
269 k.k.p.offset = i * 2;
270 k.k.p.snapshot = U32_MAX;
272 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
275 bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
280 pr_info("iterating forwards");
284 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
285 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
287 BUG_ON(k.k->p.offset != i);
292 bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
298 pr_info("iterating forwards by slots");
302 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
303 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
304 BTREE_ITER_SLOTS, k, ({
308 BUG_ON(k.k->p.offset != i);
309 BUG_ON(bkey_deleted(k.k) != (i & 1));
315 bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
320 bch2_trans_exit(&trans);
324 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
326 struct btree_trans trans;
327 struct btree_iter iter = { NULL };
332 bch2_trans_init(&trans, c, 0, 0);
336 pr_info("inserting test keys");
338 for (i = 0; i < nr; i += 16) {
339 struct bkey_i_cookie k;
341 bkey_cookie_init(&k.k_i);
342 k.k.p.offset = i + 16;
343 k.k.p.snapshot = U32_MAX;
346 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
349 bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
354 pr_info("iterating forwards");
358 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
359 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
361 BUG_ON(bkey_start_offset(k.k) != i + 8);
362 BUG_ON(k.k->size != 8);
367 bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
373 pr_info("iterating forwards by slots");
377 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
378 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
379 BTREE_ITER_SLOTS, k, ({
382 BUG_ON(bkey_deleted(k.k) != !(i % 16));
384 BUG_ON(bkey_start_offset(k.k) != i);
385 BUG_ON(k.k->size != 8);
390 bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
395 bch2_trans_exit(&trans);
400 * XXX: we really want to make sure we've got a btree with depth > 0 for these
403 static int test_peek_end(struct bch_fs *c, u64 nr)
405 struct btree_trans trans;
406 struct btree_iter iter;
409 bch2_trans_init(&trans, c, 0, 0);
410 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
411 SPOS(0, 0, U32_MAX), 0);
413 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
416 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
419 bch2_trans_iter_exit(&trans, &iter);
420 bch2_trans_exit(&trans);
424 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
426 struct btree_trans trans;
427 struct btree_iter iter;
430 bch2_trans_init(&trans, c, 0, 0);
431 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
432 SPOS(0, 0, U32_MAX), 0);
434 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
437 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
440 bch2_trans_iter_exit(&trans, &iter);
441 bch2_trans_exit(&trans);
445 /* extent unit tests */
449 static int insert_test_extent(struct bch_fs *c,
452 struct bkey_i_cookie k;
455 bkey_cookie_init(&k.k_i);
456 k.k_i.k.p.offset = end;
457 k.k_i.k.p.snapshot = U32_MAX;
458 k.k_i.k.size = end - start;
459 k.k_i.k.version.lo = test_version++;
461 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
464 bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
468 static int __test_extent_overwrite(struct bch_fs *c,
469 u64 e1_start, u64 e1_end,
470 u64 e2_start, u64 e2_end)
474 ret = insert_test_extent(c, e1_start, e1_end) ?:
475 insert_test_extent(c, e2_start, e2_end);
481 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
483 return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
484 __test_extent_overwrite(c, 8, 64, 0, 32);
487 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
489 return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
490 __test_extent_overwrite(c, 0, 64, 32, 72);
493 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
495 return __test_extent_overwrite(c, 0, 64, 32, 40);
498 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
500 return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
501 __test_extent_overwrite(c, 32, 64, 0, 128) ?:
502 __test_extent_overwrite(c, 32, 64, 32, 64) ?:
503 __test_extent_overwrite(c, 32, 64, 32, 128);
506 /* snapshot unit tests */
508 /* Test skipping over keys in unrelated snapshots: */
509 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
511 struct btree_trans trans;
512 struct btree_iter iter;
514 struct bkey_i_cookie cookie;
517 bkey_cookie_init(&cookie.k_i);
518 cookie.k.p.snapshot = snapid_hi;
519 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
524 bch2_trans_init(&trans, c, 0, 0);
525 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
526 SPOS(0, 0, snapid_lo), 0);
527 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
529 BUG_ON(k.k->p.snapshot != U32_MAX);
531 bch2_trans_iter_exit(&trans, &iter);
532 bch2_trans_exit(&trans);
536 static int test_snapshots(struct bch_fs *c, u64 nr)
538 struct bkey_i_cookie cookie;
540 u32 snapid_subvols[2] = { 1, 1 };
543 bkey_cookie_init(&cookie.k_i);
544 cookie.k.p.snapshot = U32_MAX;
545 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
550 ret = bch2_trans_do(c, NULL, NULL, 0,
551 bch2_snapshot_node_create(&trans, U32_MAX,
558 if (snapids[0] > snapids[1])
559 swap(snapids[0], snapids[1]);
561 ret = test_snapshot_filter(c, snapids[0], snapids[1]);
563 bch_err(c, "%s(): err from test_snapshot_filter: %s", __func__, bch2_err_str(ret));
572 static u64 test_rand(void)
576 get_random_bytes(&v, sizeof(v));
580 static int rand_insert(struct bch_fs *c, u64 nr)
582 struct btree_trans trans;
583 struct bkey_i_cookie k;
587 bch2_trans_init(&trans, c, 0, 0);
589 for (i = 0; i < nr; i++) {
590 bkey_cookie_init(&k.k_i);
591 k.k.p.offset = test_rand();
592 k.k.p.snapshot = U32_MAX;
594 ret = commit_do(&trans, NULL, NULL, 0,
595 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
597 bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
602 bch2_trans_exit(&trans);
606 static int rand_insert_multi(struct bch_fs *c, u64 nr)
608 struct btree_trans trans;
609 struct bkey_i_cookie k[8];
614 bch2_trans_init(&trans, c, 0, 0);
616 for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
617 for (j = 0; j < ARRAY_SIZE(k); j++) {
618 bkey_cookie_init(&k[j].k_i);
619 k[j].k.p.offset = test_rand();
620 k[j].k.p.snapshot = U32_MAX;
623 ret = commit_do(&trans, NULL, NULL, 0,
624 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?:
625 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?:
626 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?:
627 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?:
628 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?:
629 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?:
630 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
631 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
633 bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
638 bch2_trans_exit(&trans);
642 static int rand_lookup(struct bch_fs *c, u64 nr)
644 struct btree_trans trans;
645 struct btree_iter iter;
650 bch2_trans_init(&trans, c, 0, 0);
651 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
652 SPOS(0, 0, U32_MAX), 0);
654 for (i = 0; i < nr; i++) {
655 bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
657 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
660 bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
665 bch2_trans_iter_exit(&trans, &iter);
666 bch2_trans_exit(&trans);
670 static int rand_mixed_trans(struct btree_trans *trans,
671 struct btree_iter *iter,
672 struct bkey_i_cookie *cookie,
678 bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
680 k = bch2_btree_iter_peek(iter);
682 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
683 bch_err(trans->c, "%s(): lookup error: %s", __func__, bch2_err_str(ret));
687 if (!(i & 3) && k.k) {
688 bkey_cookie_init(&cookie->k_i);
689 cookie->k.p = iter->pos;
690 ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
696 static int rand_mixed(struct bch_fs *c, u64 nr)
698 struct btree_trans trans;
699 struct btree_iter iter;
700 struct bkey_i_cookie cookie;
704 bch2_trans_init(&trans, c, 0, 0);
705 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
706 SPOS(0, 0, U32_MAX), 0);
708 for (i = 0; i < nr; i++) {
710 ret = commit_do(&trans, NULL, NULL, 0,
711 rand_mixed_trans(&trans, &iter, &cookie, i, rand));
713 bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
718 bch2_trans_iter_exit(&trans, &iter);
719 bch2_trans_exit(&trans);
723 static int __do_delete(struct btree_trans *trans, struct bpos pos)
725 struct btree_iter iter;
729 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
731 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
739 ret = bch2_btree_delete_at(trans, &iter, 0);
741 bch2_trans_iter_exit(trans, &iter);
745 static int rand_delete(struct bch_fs *c, u64 nr)
747 struct btree_trans trans;
751 bch2_trans_init(&trans, c, 0, 0);
753 for (i = 0; i < nr; i++) {
754 struct bpos pos = SPOS(0, test_rand(), U32_MAX);
756 ret = commit_do(&trans, NULL, NULL, 0,
757 __do_delete(&trans, pos));
759 bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
764 bch2_trans_exit(&trans);
768 static int seq_insert(struct bch_fs *c, u64 nr)
770 struct btree_trans trans;
771 struct btree_iter iter;
773 struct bkey_i_cookie insert;
776 bkey_cookie_init(&insert.k_i);
778 bch2_trans_init(&trans, c, 0, 0);
780 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
782 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
785 if (iter.pos.offset >= nr)
787 insert.k.p = iter.pos;
788 bch2_trans_update(&trans, &iter, &insert.k_i, 0);
791 bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
793 bch2_trans_exit(&trans);
797 static int seq_lookup(struct bch_fs *c, u64 nr)
799 struct btree_trans trans;
800 struct btree_iter iter;
804 bch2_trans_init(&trans, c, 0, 0);
806 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
807 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
811 bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
813 bch2_trans_exit(&trans);
817 static int seq_overwrite(struct bch_fs *c, u64 nr)
819 struct btree_trans trans;
820 struct btree_iter iter;
824 bch2_trans_init(&trans, c, 0, 0);
826 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
828 BTREE_ITER_INTENT, k,
831 struct bkey_i_cookie u;
833 bkey_reassemble(&u.k_i, k);
834 bch2_trans_update(&trans, &iter, &u.k_i, 0);
837 bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
839 bch2_trans_exit(&trans);
843 static int seq_delete(struct bch_fs *c, u64 nr)
847 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
852 bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
856 typedef int (*perf_test_fn)(struct bch_fs *, u64);
865 wait_queue_head_t ready_wait;
868 struct completion done_completion;
875 static int btree_perf_test_thread(void *data)
877 struct test_job *j = data;
880 if (atomic_dec_and_test(&j->ready)) {
881 wake_up(&j->ready_wait);
882 j->start = sched_clock();
884 wait_event(j->ready_wait, !atomic_read(&j->ready));
887 ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
889 bch_err(j->c, "%ps: error %s", j->fn, bch2_err_str(ret));
893 if (atomic_dec_and_test(&j->done)) {
894 j->finish = sched_clock();
895 complete(&j->done_completion);
901 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
902 u64 nr, unsigned nr_threads)
904 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
906 struct printbuf nr_buf = PRINTBUF;
907 struct printbuf per_sec_buf = PRINTBUF;
911 atomic_set(&j.ready, nr_threads);
912 init_waitqueue_head(&j.ready_wait);
914 atomic_set(&j.done, nr_threads);
915 init_completion(&j.done_completion);
917 #define perf_test(_test) \
918 if (!strcmp(testname, #_test)) j.fn = _test
920 perf_test(rand_insert);
921 perf_test(rand_insert_multi);
922 perf_test(rand_lookup);
923 perf_test(rand_mixed);
924 perf_test(rand_delete);
926 perf_test(seq_insert);
927 perf_test(seq_lookup);
928 perf_test(seq_overwrite);
929 perf_test(seq_delete);
931 /* a unit test, not a perf test: */
932 perf_test(test_delete);
933 perf_test(test_delete_written);
934 perf_test(test_iterate);
935 perf_test(test_iterate_extents);
936 perf_test(test_iterate_slots);
937 perf_test(test_iterate_slots_extents);
938 perf_test(test_peek_end);
939 perf_test(test_peek_end_extents);
941 perf_test(test_extent_overwrite_front);
942 perf_test(test_extent_overwrite_back);
943 perf_test(test_extent_overwrite_middle);
944 perf_test(test_extent_overwrite_all);
946 perf_test(test_snapshots);
949 pr_err("unknown test %s", testname);
953 //pr_info("running test %s:", testname);
956 btree_perf_test_thread(&j);
958 for (i = 0; i < nr_threads; i++)
959 kthread_run(btree_perf_test_thread, &j,
960 "bcachefs perf test[%u]", i);
962 while (wait_for_completion_interruptible(&j.done_completion))
965 time = j.finish - j.start;
967 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
968 prt_human_readable_u64(&nr_buf, nr);
969 prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
970 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
971 name_buf, nr_buf.buf, nr_threads,
972 div_u64(time, NSEC_PER_SEC),
973 div_u64(time * nr_threads, nr),
975 printbuf_exit(&per_sec_buf);
976 printbuf_exit(&nr_buf);
980 #endif /* CONFIG_BCACHEFS_TESTS */