1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
10 #include "linux/kthread.h"
11 #include "linux/random.h"
13 static void delete_test_keys(struct bch_fs *c)
17 ret = bch2_btree_delete_range(c, BTREE_ID_extents,
18 SPOS(0, 0, U32_MAX), SPOS_MAX,
23 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
24 SPOS(0, 0, U32_MAX), SPOS_MAX,
31 static int test_delete(struct bch_fs *c, u64 nr)
33 struct btree_trans trans;
34 struct btree_iter iter;
35 struct bkey_i_cookie k;
38 bkey_cookie_init(&k.k_i);
39 k.k.p.snapshot = U32_MAX;
41 bch2_trans_init(&trans, c, 0, 0);
42 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
45 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
46 bch2_btree_iter_traverse(&iter) ?:
47 bch2_trans_update(&trans, &iter, &k.k_i, 0));
49 bch_err(c, "update error in test_delete: %i", ret);
53 pr_info("deleting once");
54 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
55 bch2_btree_iter_traverse(&iter) ?:
56 bch2_btree_delete_at(&trans, &iter, 0));
58 bch_err(c, "delete error (first) in test_delete: %i", ret);
62 pr_info("deleting twice");
63 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
64 bch2_btree_iter_traverse(&iter) ?:
65 bch2_btree_delete_at(&trans, &iter, 0));
67 bch_err(c, "delete error (second) in test_delete: %i", ret);
71 bch2_trans_iter_exit(&trans, &iter);
72 bch2_trans_exit(&trans);
76 static int test_delete_written(struct bch_fs *c, u64 nr)
78 struct btree_trans trans;
79 struct btree_iter iter;
80 struct bkey_i_cookie k;
83 bkey_cookie_init(&k.k_i);
84 k.k.p.snapshot = U32_MAX;
86 bch2_trans_init(&trans, c, 0, 0);
88 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
91 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
92 bch2_btree_iter_traverse(&iter) ?:
93 bch2_trans_update(&trans, &iter, &k.k_i, 0));
95 bch_err(c, "update error in test_delete_written: %i", ret);
99 bch2_trans_unlock(&trans);
100 bch2_journal_flush_all_pins(&c->journal);
102 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
103 bch2_btree_iter_traverse(&iter) ?:
104 bch2_btree_delete_at(&trans, &iter, 0));
106 bch_err(c, "delete error in test_delete_written: %i", ret);
110 bch2_trans_iter_exit(&trans, &iter);
111 bch2_trans_exit(&trans);
115 static int test_iterate(struct bch_fs *c, u64 nr)
117 struct btree_trans trans;
118 struct btree_iter iter = { NULL };
123 bch2_trans_init(&trans, c, 0, 0);
127 pr_info("inserting test keys");
129 for (i = 0; i < nr; i++) {
130 struct bkey_i_cookie k;
132 bkey_cookie_init(&k.k_i);
134 k.k.p.snapshot = U32_MAX;
136 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
139 bch_err(c, "insert error in test_iterate: %i", ret);
144 pr_info("iterating forwards");
148 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
149 SPOS(0, 0, U32_MAX), 0, k, ret) {
153 BUG_ON(k.k->p.offset != i++);
158 pr_info("iterating backwards");
160 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
161 BUG_ON(k.k->p.offset != --i);
165 bch2_trans_iter_exit(&trans, &iter);
166 bch2_trans_exit(&trans);
170 static int test_iterate_extents(struct bch_fs *c, u64 nr)
172 struct btree_trans trans;
173 struct btree_iter iter = { NULL };
178 bch2_trans_init(&trans, c, 0, 0);
182 pr_info("inserting test extents");
184 for (i = 0; i < nr; i += 8) {
185 struct bkey_i_cookie k;
187 bkey_cookie_init(&k.k_i);
188 k.k.p.offset = i + 8;
189 k.k.p.snapshot = U32_MAX;
192 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
195 bch_err(c, "insert error in test_iterate_extents: %i", ret);
200 pr_info("iterating forwards");
204 for_each_btree_key(&trans, iter, BTREE_ID_extents,
205 SPOS(0, 0, U32_MAX), 0, k, ret) {
206 BUG_ON(bkey_start_offset(k.k) != i);
212 pr_info("iterating backwards");
214 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
215 BUG_ON(k.k->p.offset != i);
216 i = bkey_start_offset(k.k);
221 bch2_trans_iter_exit(&trans, &iter);
222 bch2_trans_exit(&trans);
226 static int test_iterate_slots(struct bch_fs *c, u64 nr)
228 struct btree_trans trans;
229 struct btree_iter iter = { NULL };
234 bch2_trans_init(&trans, c, 0, 0);
238 pr_info("inserting test keys");
240 for (i = 0; i < nr; i++) {
241 struct bkey_i_cookie k;
243 bkey_cookie_init(&k.k_i);
244 k.k.p.offset = i * 2;
245 k.k.p.snapshot = U32_MAX;
247 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
250 bch_err(c, "insert error in test_iterate_slots: %i", ret);
255 pr_info("iterating forwards");
259 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
260 SPOS(0, 0, U32_MAX), 0, k, ret) {
264 BUG_ON(k.k->p.offset != i);
267 bch2_trans_iter_exit(&trans, &iter);
271 pr_info("iterating forwards by slots");
275 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
277 BTREE_ITER_SLOTS, k, ret) {
278 BUG_ON(k.k->p.offset != i);
279 BUG_ON(bkey_deleted(k.k) != (i & 1));
285 bch2_trans_iter_exit(&trans, &iter);
287 bch2_trans_exit(&trans);
291 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
293 struct btree_trans trans;
294 struct btree_iter iter = { NULL };
299 bch2_trans_init(&trans, c, 0, 0);
303 pr_info("inserting test keys");
305 for (i = 0; i < nr; i += 16) {
306 struct bkey_i_cookie k;
308 bkey_cookie_init(&k.k_i);
309 k.k.p.offset = i + 16;
310 k.k.p.snapshot = U32_MAX;
313 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
316 bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
321 pr_info("iterating forwards");
325 for_each_btree_key(&trans, iter, BTREE_ID_extents,
326 SPOS(0, 0, U32_MAX), 0, k, ret) {
327 BUG_ON(bkey_start_offset(k.k) != i + 8);
328 BUG_ON(k.k->size != 8);
331 bch2_trans_iter_exit(&trans, &iter);
335 pr_info("iterating forwards by slots");
339 for_each_btree_key(&trans, iter, BTREE_ID_extents,
341 BTREE_ITER_SLOTS, k, ret) {
342 BUG_ON(bkey_deleted(k.k) != !(i % 16));
344 BUG_ON(bkey_start_offset(k.k) != i);
345 BUG_ON(k.k->size != 8);
351 bch2_trans_iter_exit(&trans, &iter);
353 bch2_trans_exit(&trans);
358 * XXX: we really want to make sure we've got a btree with depth > 0 for these
361 static int test_peek_end(struct bch_fs *c, u64 nr)
363 struct btree_trans trans;
364 struct btree_iter iter;
367 bch2_trans_init(&trans, c, 0, 0);
368 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
369 SPOS(0, 0, U32_MAX), 0);
371 k = bch2_btree_iter_peek(&iter);
374 k = bch2_btree_iter_peek(&iter);
377 bch2_trans_iter_exit(&trans, &iter);
378 bch2_trans_exit(&trans);
382 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
384 struct btree_trans trans;
385 struct btree_iter iter;
388 bch2_trans_init(&trans, c, 0, 0);
389 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
390 SPOS(0, 0, U32_MAX), 0);
392 k = bch2_btree_iter_peek(&iter);
395 k = bch2_btree_iter_peek(&iter);
398 bch2_trans_iter_exit(&trans, &iter);
399 bch2_trans_exit(&trans);
403 /* extent unit tests */
407 static int insert_test_extent(struct bch_fs *c,
410 struct bkey_i_cookie k;
413 bkey_cookie_init(&k.k_i);
414 k.k_i.k.p.offset = end;
415 k.k_i.k.p.snapshot = U32_MAX;
416 k.k_i.k.size = end - start;
417 k.k_i.k.version.lo = test_version++;
419 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
422 bch_err(c, "insert error in insert_test_extent: %i", ret);
426 static int __test_extent_overwrite(struct bch_fs *c,
427 u64 e1_start, u64 e1_end,
428 u64 e2_start, u64 e2_end)
432 ret = insert_test_extent(c, e1_start, e1_end) ?:
433 insert_test_extent(c, e2_start, e2_end);
439 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
441 return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
442 __test_extent_overwrite(c, 8, 64, 0, 32);
445 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
447 return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
448 __test_extent_overwrite(c, 0, 64, 32, 72);
451 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
453 return __test_extent_overwrite(c, 0, 64, 32, 40);
456 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
458 return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
459 __test_extent_overwrite(c, 32, 64, 0, 128) ?:
460 __test_extent_overwrite(c, 32, 64, 32, 64) ?:
461 __test_extent_overwrite(c, 32, 64, 32, 128);
464 /* snapshot unit tests */
466 /* Test skipping over keys in unrelated snapshots: */
467 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
469 struct btree_trans trans;
470 struct btree_iter iter;
472 struct bkey_i_cookie cookie;
475 bkey_cookie_init(&cookie.k_i);
476 cookie.k.p.snapshot = snapid_hi;
477 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
482 bch2_trans_init(&trans, c, 0, 0);
483 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
484 SPOS(0, 0, snapid_lo), 0);
485 k = bch2_btree_iter_peek(&iter);
487 BUG_ON(k.k->p.snapshot != U32_MAX);
489 bch2_trans_iter_exit(&trans, &iter);
490 bch2_trans_exit(&trans);
494 static int test_snapshots(struct bch_fs *c, u64 nr)
496 struct bkey_i_cookie cookie;
498 u32 snapid_subvols[2] = { 1, 1 };
501 bkey_cookie_init(&cookie.k_i);
502 cookie.k.p.snapshot = U32_MAX;
503 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
508 ret = bch2_trans_do(c, NULL, NULL, 0,
509 bch2_snapshot_node_create(&trans, U32_MAX,
516 if (snapids[0] > snapids[1])
517 swap(snapids[0], snapids[1]);
519 ret = test_snapshot_filter(c, snapids[0], snapids[1]);
521 bch_err(c, "err %i from test_snapshot_filter", ret);
530 static u64 test_rand(void)
536 prandom_bytes(&v, sizeof(v));
541 static int rand_insert(struct bch_fs *c, u64 nr)
543 struct btree_trans trans;
544 struct bkey_i_cookie k;
548 bch2_trans_init(&trans, c, 0, 0);
550 for (i = 0; i < nr; i++) {
551 bkey_cookie_init(&k.k_i);
552 k.k.p.offset = test_rand();
553 k.k.p.snapshot = U32_MAX;
555 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
556 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
558 bch_err(c, "error in rand_insert: %i", ret);
563 bch2_trans_exit(&trans);
567 static int rand_insert_multi(struct bch_fs *c, u64 nr)
569 struct btree_trans trans;
570 struct bkey_i_cookie k[8];
575 bch2_trans_init(&trans, c, 0, 0);
577 for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
578 for (j = 0; j < ARRAY_SIZE(k); j++) {
579 bkey_cookie_init(&k[j].k_i);
580 k[j].k.p.offset = test_rand();
581 k[j].k.p.snapshot = U32_MAX;
584 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
585 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?:
586 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?:
587 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?:
588 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?:
589 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?:
590 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?:
591 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
592 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
594 bch_err(c, "error in rand_insert_multi: %i", ret);
599 bch2_trans_exit(&trans);
603 static int rand_lookup(struct bch_fs *c, u64 nr)
605 struct btree_trans trans;
606 struct btree_iter iter;
611 bch2_trans_init(&trans, c, 0, 0);
612 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
613 SPOS(0, 0, U32_MAX), 0);
615 for (i = 0; i < nr; i++) {
616 bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
618 k = bch2_btree_iter_peek(&iter);
621 bch_err(c, "error in rand_lookup: %i", ret);
626 bch2_trans_iter_exit(&trans, &iter);
627 bch2_trans_exit(&trans);
631 static int rand_mixed_trans(struct btree_trans *trans,
632 struct btree_iter *iter,
633 struct bkey_i_cookie *cookie,
639 bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
641 k = bch2_btree_iter_peek(iter);
643 if (ret && ret != -EINTR)
644 bch_err(trans->c, "lookup error in rand_mixed: %i", ret);
648 if (!(i & 3) && k.k) {
649 bkey_cookie_init(&cookie->k_i);
650 cookie->k.p = iter->pos;
651 ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
657 static int rand_mixed(struct bch_fs *c, u64 nr)
659 struct btree_trans trans;
660 struct btree_iter iter;
661 struct bkey_i_cookie cookie;
665 bch2_trans_init(&trans, c, 0, 0);
666 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
667 SPOS(0, 0, U32_MAX), 0);
669 for (i = 0; i < nr; i++) {
671 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
672 rand_mixed_trans(&trans, &iter, &cookie, i, rand));
674 bch_err(c, "update error in rand_mixed: %i", ret);
679 bch2_trans_iter_exit(&trans, &iter);
680 bch2_trans_exit(&trans);
684 static int __do_delete(struct btree_trans *trans, struct bpos pos)
686 struct btree_iter iter;
690 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
692 k = bch2_btree_iter_peek(&iter);
700 ret = bch2_btree_delete_at(trans, &iter, 0);
702 bch2_trans_iter_exit(trans, &iter);
706 static int rand_delete(struct bch_fs *c, u64 nr)
708 struct btree_trans trans;
712 bch2_trans_init(&trans, c, 0, 0);
714 for (i = 0; i < nr; i++) {
715 struct bpos pos = SPOS(0, test_rand(), U32_MAX);
717 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
718 __do_delete(&trans, pos));
720 bch_err(c, "error in rand_delete: %i", ret);
725 bch2_trans_exit(&trans);
729 static int seq_insert(struct bch_fs *c, u64 nr)
731 struct btree_trans trans;
732 struct btree_iter iter;
734 struct bkey_i_cookie insert;
738 bkey_cookie_init(&insert.k_i);
740 bch2_trans_init(&trans, c, 0, 0);
742 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX),
743 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
744 insert.k.p = iter.pos;
746 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
747 bch2_btree_iter_traverse(&iter) ?:
748 bch2_trans_update(&trans, &iter, &insert.k_i, 0));
750 bch_err(c, "error in seq_insert: %i", ret);
757 bch2_trans_iter_exit(&trans, &iter);
759 bch2_trans_exit(&trans);
763 static int seq_lookup(struct bch_fs *c, u64 nr)
765 struct btree_trans trans;
766 struct btree_iter iter;
770 bch2_trans_init(&trans, c, 0, 0);
772 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
773 SPOS(0, 0, U32_MAX), 0, k, ret)
775 bch2_trans_iter_exit(&trans, &iter);
777 bch2_trans_exit(&trans);
781 static int seq_overwrite(struct bch_fs *c, u64 nr)
783 struct btree_trans trans;
784 struct btree_iter iter;
788 bch2_trans_init(&trans, c, 0, 0);
790 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
792 BTREE_ITER_INTENT, k, ret) {
793 struct bkey_i_cookie u;
795 bkey_reassemble(&u.k_i, k);
797 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
798 bch2_btree_iter_traverse(&iter) ?:
799 bch2_trans_update(&trans, &iter, &u.k_i, 0));
801 bch_err(c, "error in seq_overwrite: %i", ret);
805 bch2_trans_iter_exit(&trans, &iter);
807 bch2_trans_exit(&trans);
811 static int seq_delete(struct bch_fs *c, u64 nr)
815 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
816 SPOS(0, 0, U32_MAX), SPOS_MAX,
819 bch_err(c, "error in seq_delete: %i", ret);
823 typedef int (*perf_test_fn)(struct bch_fs *, u64);
832 wait_queue_head_t ready_wait;
835 struct completion done_completion;
842 static int btree_perf_test_thread(void *data)
844 struct test_job *j = data;
847 if (atomic_dec_and_test(&j->ready)) {
848 wake_up(&j->ready_wait);
849 j->start = sched_clock();
851 wait_event(j->ready_wait, !atomic_read(&j->ready));
854 ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
856 bch_err(j->c, "%ps: error %i", j->fn, ret);
860 if (atomic_dec_and_test(&j->done)) {
861 j->finish = sched_clock();
862 complete(&j->done_completion);
868 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
869 u64 nr, unsigned nr_threads)
871 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
873 struct printbuf nr_buf = PRINTBUF;
874 struct printbuf per_sec_buf = PRINTBUF;
878 atomic_set(&j.ready, nr_threads);
879 init_waitqueue_head(&j.ready_wait);
881 atomic_set(&j.done, nr_threads);
882 init_completion(&j.done_completion);
884 #define perf_test(_test) \
885 if (!strcmp(testname, #_test)) j.fn = _test
887 perf_test(rand_insert);
888 perf_test(rand_insert_multi);
889 perf_test(rand_lookup);
890 perf_test(rand_mixed);
891 perf_test(rand_delete);
893 perf_test(seq_insert);
894 perf_test(seq_lookup);
895 perf_test(seq_overwrite);
896 perf_test(seq_delete);
898 /* a unit test, not a perf test: */
899 perf_test(test_delete);
900 perf_test(test_delete_written);
901 perf_test(test_iterate);
902 perf_test(test_iterate_extents);
903 perf_test(test_iterate_slots);
904 perf_test(test_iterate_slots_extents);
905 perf_test(test_peek_end);
906 perf_test(test_peek_end_extents);
908 perf_test(test_extent_overwrite_front);
909 perf_test(test_extent_overwrite_back);
910 perf_test(test_extent_overwrite_middle);
911 perf_test(test_extent_overwrite_all);
913 perf_test(test_snapshots);
916 pr_err("unknown test %s", testname);
920 //pr_info("running test %s:", testname);
923 btree_perf_test_thread(&j);
925 for (i = 0; i < nr_threads; i++)
926 kthread_run(btree_perf_test_thread, &j,
927 "bcachefs perf test[%u]", i);
929 while (wait_for_completion_interruptible(&j.done_completion))
932 time = j.finish - j.start;
934 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
935 prt_human_readable_u64(&nr_buf, nr);
936 prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
937 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
938 name_buf, nr_buf.buf, nr_threads,
939 div_u64(time, NSEC_PER_SEC),
940 div_u64(time * nr_threads, nr),
942 printbuf_exit(&per_sec_buf);
943 printbuf_exit(&nr_buf);
947 #endif /* CONFIG_BCACHEFS_TESTS */