1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
10 #include "linux/kthread.h"
11 #include "linux/random.h"
13 static void delete_test_keys(struct bch_fs *c)
17 ret = bch2_btree_delete_range(c, BTREE_ID_extents,
18 SPOS(0, 0, U32_MAX), SPOS_MAX,
23 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
24 SPOS(0, 0, U32_MAX), SPOS_MAX,
31 static int test_delete(struct bch_fs *c, u64 nr)
33 struct btree_trans trans;
34 struct btree_iter iter;
35 struct bkey_i_cookie k;
38 bkey_cookie_init(&k.k_i);
39 k.k.p.snapshot = U32_MAX;
41 bch2_trans_init(&trans, c, 0, 0);
42 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
45 ret = commit_do(&trans, NULL, NULL, 0,
46 bch2_btree_iter_traverse(&iter) ?:
47 bch2_trans_update(&trans, &iter, &k.k_i, 0));
49 bch_err(c, "update error in test_delete: %s", bch2_err_str(ret));
53 pr_info("deleting once");
54 ret = commit_do(&trans, NULL, NULL, 0,
55 bch2_btree_iter_traverse(&iter) ?:
56 bch2_btree_delete_at(&trans, &iter, 0));
58 bch_err(c, "delete error (first) in test_delete: %s", bch2_err_str(ret));
62 pr_info("deleting twice");
63 ret = commit_do(&trans, NULL, NULL, 0,
64 bch2_btree_iter_traverse(&iter) ?:
65 bch2_btree_delete_at(&trans, &iter, 0));
67 bch_err(c, "delete error (second) in test_delete: %s", bch2_err_str(ret));
71 bch2_trans_iter_exit(&trans, &iter);
72 bch2_trans_exit(&trans);
76 static int test_delete_written(struct bch_fs *c, u64 nr)
78 struct btree_trans trans;
79 struct btree_iter iter;
80 struct bkey_i_cookie k;
83 bkey_cookie_init(&k.k_i);
84 k.k.p.snapshot = U32_MAX;
86 bch2_trans_init(&trans, c, 0, 0);
88 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
91 ret = commit_do(&trans, NULL, NULL, 0,
92 bch2_btree_iter_traverse(&iter) ?:
93 bch2_trans_update(&trans, &iter, &k.k_i, 0));
95 bch_err(c, "update error in test_delete_written: %s", bch2_err_str(ret));
99 bch2_trans_unlock(&trans);
100 bch2_journal_flush_all_pins(&c->journal);
102 ret = commit_do(&trans, NULL, NULL, 0,
103 bch2_btree_iter_traverse(&iter) ?:
104 bch2_btree_delete_at(&trans, &iter, 0));
106 bch_err(c, "delete error in test_delete_written: %s", bch2_err_str(ret));
110 bch2_trans_iter_exit(&trans, &iter);
111 bch2_trans_exit(&trans);
115 static int test_iterate(struct bch_fs *c, u64 nr)
117 struct btree_trans trans;
118 struct btree_iter iter = { NULL };
123 bch2_trans_init(&trans, c, 0, 0);
127 pr_info("inserting test keys");
129 for (i = 0; i < nr; i++) {
130 struct bkey_i_cookie k;
132 bkey_cookie_init(&k.k_i);
134 k.k.p.snapshot = U32_MAX;
136 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
139 bch_err(c, "insert error in test_iterate: %s", bch2_err_str(ret));
144 pr_info("iterating forwards");
148 ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs,
149 SPOS(0, 0, U32_MAX), 0, k, ({
150 BUG_ON(k.k->p.offset != i++);
154 bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
160 pr_info("iterating backwards");
162 ret = for_each_btree_key_reverse(&trans, iter, BTREE_ID_xattrs,
163 SPOS(0, U64_MAX, U32_MAX), 0, k,
165 BUG_ON(k.k->p.offset != --i);
169 bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
175 bch2_trans_iter_exit(&trans, &iter);
176 bch2_trans_exit(&trans);
180 static int test_iterate_extents(struct bch_fs *c, u64 nr)
182 struct btree_trans trans;
183 struct btree_iter iter = { NULL };
188 bch2_trans_init(&trans, c, 0, 0);
192 pr_info("inserting test extents");
194 for (i = 0; i < nr; i += 8) {
195 struct bkey_i_cookie k;
197 bkey_cookie_init(&k.k_i);
198 k.k.p.offset = i + 8;
199 k.k.p.snapshot = U32_MAX;
202 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
205 bch_err(c, "insert error in test_iterate_extents: %s", bch2_err_str(ret));
210 pr_info("iterating forwards");
214 ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents,
215 SPOS(0, 0, U32_MAX), 0, k, ({
216 BUG_ON(bkey_start_offset(k.k) != i);
221 bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
227 pr_info("iterating backwards");
229 ret = for_each_btree_key_reverse(&trans, iter, BTREE_ID_extents,
230 SPOS(0, U64_MAX, U32_MAX), 0, k,
232 BUG_ON(k.k->p.offset != i);
233 i = bkey_start_offset(k.k);
237 bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
243 bch2_trans_iter_exit(&trans, &iter);
244 bch2_trans_exit(&trans);
248 static int test_iterate_slots(struct bch_fs *c, u64 nr)
250 struct btree_trans trans;
251 struct btree_iter iter = { NULL };
256 bch2_trans_init(&trans, c, 0, 0);
260 pr_info("inserting test keys");
262 for (i = 0; i < nr; i++) {
263 struct bkey_i_cookie k;
265 bkey_cookie_init(&k.k_i);
266 k.k.p.offset = i * 2;
267 k.k.p.snapshot = U32_MAX;
269 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
272 bch_err(c, "insert error in test_iterate_slots: %s", bch2_err_str(ret));
277 pr_info("iterating forwards");
281 ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs,
282 SPOS(0, 0, U32_MAX), 0, k, ({
283 BUG_ON(k.k->p.offset != i);
288 bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
294 pr_info("iterating forwards by slots");
298 ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs,
300 BTREE_ITER_SLOTS, k, ({
304 BUG_ON(k.k->p.offset != i);
305 BUG_ON(bkey_deleted(k.k) != (i & 1));
311 bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
316 bch2_trans_exit(&trans);
320 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
322 struct btree_trans trans;
323 struct btree_iter iter = { NULL };
328 bch2_trans_init(&trans, c, 0, 0);
332 pr_info("inserting test keys");
334 for (i = 0; i < nr; i += 16) {
335 struct bkey_i_cookie k;
337 bkey_cookie_init(&k.k_i);
338 k.k.p.offset = i + 16;
339 k.k.p.snapshot = U32_MAX;
342 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
345 bch_err(c, "insert error in test_iterate_slots_extents: %s", bch2_err_str(ret));
350 pr_info("iterating forwards");
354 ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents,
355 SPOS(0, 0, U32_MAX), 0, k, ({
356 BUG_ON(bkey_start_offset(k.k) != i + 8);
357 BUG_ON(k.k->size != 8);
362 bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
368 pr_info("iterating forwards by slots");
372 ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents,
374 BTREE_ITER_SLOTS, k, ({
377 BUG_ON(bkey_deleted(k.k) != !(i % 16));
379 BUG_ON(bkey_start_offset(k.k) != i);
380 BUG_ON(k.k->size != 8);
385 bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
390 bch2_trans_exit(&trans);
395 * XXX: we really want to make sure we've got a btree with depth > 0 for these
398 static int test_peek_end(struct bch_fs *c, u64 nr)
400 struct btree_trans trans;
401 struct btree_iter iter;
404 bch2_trans_init(&trans, c, 0, 0);
405 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
406 SPOS(0, 0, U32_MAX), 0);
408 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
411 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
414 bch2_trans_iter_exit(&trans, &iter);
415 bch2_trans_exit(&trans);
419 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
421 struct btree_trans trans;
422 struct btree_iter iter;
425 bch2_trans_init(&trans, c, 0, 0);
426 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
427 SPOS(0, 0, U32_MAX), 0);
429 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
432 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
435 bch2_trans_iter_exit(&trans, &iter);
436 bch2_trans_exit(&trans);
440 /* extent unit tests */
444 static int insert_test_extent(struct bch_fs *c,
447 struct bkey_i_cookie k;
450 bkey_cookie_init(&k.k_i);
451 k.k_i.k.p.offset = end;
452 k.k_i.k.p.snapshot = U32_MAX;
453 k.k_i.k.size = end - start;
454 k.k_i.k.version.lo = test_version++;
456 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
459 bch_err(c, "insert error in insert_test_extent: %s", bch2_err_str(ret));
463 static int __test_extent_overwrite(struct bch_fs *c,
464 u64 e1_start, u64 e1_end,
465 u64 e2_start, u64 e2_end)
469 ret = insert_test_extent(c, e1_start, e1_end) ?:
470 insert_test_extent(c, e2_start, e2_end);
476 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
478 return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
479 __test_extent_overwrite(c, 8, 64, 0, 32);
482 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
484 return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
485 __test_extent_overwrite(c, 0, 64, 32, 72);
488 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
490 return __test_extent_overwrite(c, 0, 64, 32, 40);
493 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
495 return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
496 __test_extent_overwrite(c, 32, 64, 0, 128) ?:
497 __test_extent_overwrite(c, 32, 64, 32, 64) ?:
498 __test_extent_overwrite(c, 32, 64, 32, 128);
501 /* snapshot unit tests */
503 /* Test skipping over keys in unrelated snapshots: */
504 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
506 struct btree_trans trans;
507 struct btree_iter iter;
509 struct bkey_i_cookie cookie;
512 bkey_cookie_init(&cookie.k_i);
513 cookie.k.p.snapshot = snapid_hi;
514 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
519 bch2_trans_init(&trans, c, 0, 0);
520 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
521 SPOS(0, 0, snapid_lo), 0);
522 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
524 BUG_ON(k.k->p.snapshot != U32_MAX);
526 bch2_trans_iter_exit(&trans, &iter);
527 bch2_trans_exit(&trans);
531 static int test_snapshots(struct bch_fs *c, u64 nr)
533 struct bkey_i_cookie cookie;
535 u32 snapid_subvols[2] = { 1, 1 };
538 bkey_cookie_init(&cookie.k_i);
539 cookie.k.p.snapshot = U32_MAX;
540 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
545 ret = bch2_trans_do(c, NULL, NULL, 0,
546 bch2_snapshot_node_create(&trans, U32_MAX,
553 if (snapids[0] > snapids[1])
554 swap(snapids[0], snapids[1]);
556 ret = test_snapshot_filter(c, snapids[0], snapids[1]);
558 bch_err(c, "err from test_snapshot_filter: %s", bch2_err_str(ret));
567 static u64 test_rand(void)
573 prandom_bytes(&v, sizeof(v));
578 static int rand_insert(struct bch_fs *c, u64 nr)
580 struct btree_trans trans;
581 struct bkey_i_cookie k;
585 bch2_trans_init(&trans, c, 0, 0);
587 for (i = 0; i < nr; i++) {
588 bkey_cookie_init(&k.k_i);
589 k.k.p.offset = test_rand();
590 k.k.p.snapshot = U32_MAX;
592 ret = commit_do(&trans, NULL, NULL, 0,
593 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
595 bch_err(c, "error in rand_insert: %s", bch2_err_str(ret));
600 bch2_trans_exit(&trans);
604 static int rand_insert_multi(struct bch_fs *c, u64 nr)
606 struct btree_trans trans;
607 struct bkey_i_cookie k[8];
612 bch2_trans_init(&trans, c, 0, 0);
614 for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
615 for (j = 0; j < ARRAY_SIZE(k); j++) {
616 bkey_cookie_init(&k[j].k_i);
617 k[j].k.p.offset = test_rand();
618 k[j].k.p.snapshot = U32_MAX;
621 ret = commit_do(&trans, NULL, NULL, 0,
622 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?:
623 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?:
624 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?:
625 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?:
626 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?:
627 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?:
628 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
629 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
631 bch_err(c, "error in rand_insert_multi: %s", bch2_err_str(ret));
636 bch2_trans_exit(&trans);
640 static int rand_lookup(struct bch_fs *c, u64 nr)
642 struct btree_trans trans;
643 struct btree_iter iter;
648 bch2_trans_init(&trans, c, 0, 0);
649 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
650 SPOS(0, 0, U32_MAX), 0);
652 for (i = 0; i < nr; i++) {
653 bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
655 lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
658 bch_err(c, "error in rand_lookup: %s", bch2_err_str(ret));
663 bch2_trans_iter_exit(&trans, &iter);
664 bch2_trans_exit(&trans);
668 static int rand_mixed_trans(struct btree_trans *trans,
669 struct btree_iter *iter,
670 struct bkey_i_cookie *cookie,
676 bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
678 k = bch2_btree_iter_peek(iter);
680 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
681 bch_err(trans->c, "lookup error in rand_mixed: %s", bch2_err_str(ret));
685 if (!(i & 3) && k.k) {
686 bkey_cookie_init(&cookie->k_i);
687 cookie->k.p = iter->pos;
688 ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
694 static int rand_mixed(struct bch_fs *c, u64 nr)
696 struct btree_trans trans;
697 struct btree_iter iter;
698 struct bkey_i_cookie cookie;
702 bch2_trans_init(&trans, c, 0, 0);
703 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
704 SPOS(0, 0, U32_MAX), 0);
706 for (i = 0; i < nr; i++) {
708 ret = commit_do(&trans, NULL, NULL, 0,
709 rand_mixed_trans(&trans, &iter, &cookie, i, rand));
711 bch_err(c, "update error in rand_mixed: %s", bch2_err_str(ret));
716 bch2_trans_iter_exit(&trans, &iter);
717 bch2_trans_exit(&trans);
721 static int __do_delete(struct btree_trans *trans, struct bpos pos)
723 struct btree_iter iter;
727 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
729 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
737 ret = bch2_btree_delete_at(trans, &iter, 0);
739 bch2_trans_iter_exit(trans, &iter);
743 static int rand_delete(struct bch_fs *c, u64 nr)
745 struct btree_trans trans;
749 bch2_trans_init(&trans, c, 0, 0);
751 for (i = 0; i < nr; i++) {
752 struct bpos pos = SPOS(0, test_rand(), U32_MAX);
754 ret = commit_do(&trans, NULL, NULL, 0,
755 __do_delete(&trans, pos));
757 bch_err(c, "error in rand_delete: %s", bch2_err_str(ret));
762 bch2_trans_exit(&trans);
766 static int seq_insert(struct bch_fs *c, u64 nr)
768 struct btree_trans trans;
769 struct btree_iter iter;
771 struct bkey_i_cookie insert;
774 bkey_cookie_init(&insert.k_i);
776 bch2_trans_init(&trans, c, 0, 0);
778 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
780 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
783 if (iter.pos.offset >= nr)
785 insert.k.p = iter.pos;
786 bch2_trans_update(&trans, &iter, &insert.k_i, 0);
789 bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
791 bch2_trans_exit(&trans);
795 static int seq_lookup(struct bch_fs *c, u64 nr)
797 struct btree_trans trans;
798 struct btree_iter iter;
802 bch2_trans_init(&trans, c, 0, 0);
804 ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs,
805 SPOS(0, 0, U32_MAX), 0, k,
808 bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
810 bch2_trans_exit(&trans);
814 static int seq_overwrite(struct bch_fs *c, u64 nr)
816 struct btree_trans trans;
817 struct btree_iter iter;
821 bch2_trans_init(&trans, c, 0, 0);
823 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
825 BTREE_ITER_INTENT, k,
828 struct bkey_i_cookie u;
830 bkey_reassemble(&u.k_i, k);
831 bch2_trans_update(&trans, &iter, &u.k_i, 0);
834 bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
836 bch2_trans_exit(&trans);
840 static int seq_delete(struct bch_fs *c, u64 nr)
844 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
845 SPOS(0, 0, U32_MAX), SPOS_MAX,
848 bch_err(c, "error in seq_delete: %s", bch2_err_str(ret));
852 typedef int (*perf_test_fn)(struct bch_fs *, u64);
861 wait_queue_head_t ready_wait;
864 struct completion done_completion;
871 static int btree_perf_test_thread(void *data)
873 struct test_job *j = data;
876 if (atomic_dec_and_test(&j->ready)) {
877 wake_up(&j->ready_wait);
878 j->start = sched_clock();
880 wait_event(j->ready_wait, !atomic_read(&j->ready));
883 ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
885 bch_err(j->c, "%ps: error %s", j->fn, bch2_err_str(ret));
889 if (atomic_dec_and_test(&j->done)) {
890 j->finish = sched_clock();
891 complete(&j->done_completion);
897 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
898 u64 nr, unsigned nr_threads)
900 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
902 struct printbuf nr_buf = PRINTBUF;
903 struct printbuf per_sec_buf = PRINTBUF;
907 atomic_set(&j.ready, nr_threads);
908 init_waitqueue_head(&j.ready_wait);
910 atomic_set(&j.done, nr_threads);
911 init_completion(&j.done_completion);
913 #define perf_test(_test) \
914 if (!strcmp(testname, #_test)) j.fn = _test
916 perf_test(rand_insert);
917 perf_test(rand_insert_multi);
918 perf_test(rand_lookup);
919 perf_test(rand_mixed);
920 perf_test(rand_delete);
922 perf_test(seq_insert);
923 perf_test(seq_lookup);
924 perf_test(seq_overwrite);
925 perf_test(seq_delete);
927 /* a unit test, not a perf test: */
928 perf_test(test_delete);
929 perf_test(test_delete_written);
930 perf_test(test_iterate);
931 perf_test(test_iterate_extents);
932 perf_test(test_iterate_slots);
933 perf_test(test_iterate_slots_extents);
934 perf_test(test_peek_end);
935 perf_test(test_peek_end_extents);
937 perf_test(test_extent_overwrite_front);
938 perf_test(test_extent_overwrite_back);
939 perf_test(test_extent_overwrite_middle);
940 perf_test(test_extent_overwrite_all);
942 perf_test(test_snapshots);
945 pr_err("unknown test %s", testname);
949 //pr_info("running test %s:", testname);
952 btree_perf_test_thread(&j);
954 for (i = 0; i < nr_threads; i++)
955 kthread_run(btree_perf_test_thread, &j,
956 "bcachefs perf test[%u]", i);
958 while (wait_for_completion_interruptible(&j.done_completion))
961 time = j.finish - j.start;
963 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
964 prt_human_readable_u64(&nr_buf, nr);
965 prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
966 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
967 name_buf, nr_buf.buf, nr_threads,
968 div_u64(time, NSEC_PER_SEC),
969 div_u64(time * nr_threads, nr),
971 printbuf_exit(&per_sec_buf);
972 printbuf_exit(&nr_buf);
976 #endif /* CONFIG_BCACHEFS_TESTS */