1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
10 #include "linux/kthread.h"
11 #include "linux/random.h"
13 static void delete_test_keys(struct bch_fs *c)
17 ret = bch2_btree_delete_range(c, BTREE_ID_extents,
19 BTREE_ITER_ALL_SNAPSHOTS,
23 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
25 BTREE_ITER_ALL_SNAPSHOTS,
32 static int test_delete(struct bch_fs *c, u64 nr)
34 struct btree_trans trans;
35 struct btree_iter iter;
36 struct bkey_i_cookie k;
39 bkey_cookie_init(&k.k_i);
40 k.k.p.snapshot = U32_MAX;
42 bch2_trans_init(&trans, c, 0, 0);
43 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
46 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
47 bch2_btree_iter_traverse(&iter) ?:
48 bch2_trans_update(&trans, &iter, &k.k_i, 0));
50 bch_err(c, "update error in test_delete: %i", ret);
54 pr_info("deleting once");
55 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
56 bch2_btree_iter_traverse(&iter) ?:
57 bch2_btree_delete_at(&trans, &iter, 0));
59 bch_err(c, "delete error (first) in test_delete: %i", ret);
63 pr_info("deleting twice");
64 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
65 bch2_btree_iter_traverse(&iter) ?:
66 bch2_btree_delete_at(&trans, &iter, 0));
68 bch_err(c, "delete error (second) in test_delete: %i", ret);
72 bch2_trans_iter_exit(&trans, &iter);
73 bch2_trans_exit(&trans);
77 static int test_delete_written(struct bch_fs *c, u64 nr)
79 struct btree_trans trans;
80 struct btree_iter iter;
81 struct bkey_i_cookie k;
84 bkey_cookie_init(&k.k_i);
85 k.k.p.snapshot = U32_MAX;
87 bch2_trans_init(&trans, c, 0, 0);
89 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
92 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
93 bch2_btree_iter_traverse(&iter) ?:
94 bch2_trans_update(&trans, &iter, &k.k_i, 0));
96 bch_err(c, "update error in test_delete_written: %i", ret);
100 bch2_trans_unlock(&trans);
101 bch2_journal_flush_all_pins(&c->journal);
103 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
104 bch2_btree_iter_traverse(&iter) ?:
105 bch2_btree_delete_at(&trans, &iter, 0));
107 bch_err(c, "delete error in test_delete_written: %i", ret);
111 bch2_trans_iter_exit(&trans, &iter);
112 bch2_trans_exit(&trans);
116 static int test_iterate(struct bch_fs *c, u64 nr)
118 struct btree_trans trans;
119 struct btree_iter iter = { NULL };
124 bch2_trans_init(&trans, c, 0, 0);
128 pr_info("inserting test keys");
130 for (i = 0; i < nr; i++) {
131 struct bkey_i_cookie k;
133 bkey_cookie_init(&k.k_i);
135 k.k.p.snapshot = U32_MAX;
137 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
140 bch_err(c, "insert error in test_iterate: %i", ret);
145 pr_info("iterating forwards");
149 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
150 SPOS(0, 0, U32_MAX), 0, k, ret) {
154 BUG_ON(k.k->p.offset != i++);
159 pr_info("iterating backwards");
161 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
162 BUG_ON(k.k->p.offset != --i);
166 bch2_trans_iter_exit(&trans, &iter);
167 bch2_trans_exit(&trans);
171 static int test_iterate_extents(struct bch_fs *c, u64 nr)
173 struct btree_trans trans;
174 struct btree_iter iter = { NULL };
179 bch2_trans_init(&trans, c, 0, 0);
183 pr_info("inserting test extents");
185 for (i = 0; i < nr; i += 8) {
186 struct bkey_i_cookie k;
188 bkey_cookie_init(&k.k_i);
189 k.k.p.offset = i + 8;
190 k.k.p.snapshot = U32_MAX;
193 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
196 bch_err(c, "insert error in test_iterate_extents: %i", ret);
201 pr_info("iterating forwards");
205 for_each_btree_key(&trans, iter, BTREE_ID_extents,
206 SPOS(0, 0, U32_MAX), 0, k, ret) {
207 BUG_ON(bkey_start_offset(k.k) != i);
213 pr_info("iterating backwards");
215 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
216 BUG_ON(k.k->p.offset != i);
217 i = bkey_start_offset(k.k);
222 bch2_trans_iter_exit(&trans, &iter);
223 bch2_trans_exit(&trans);
227 static int test_iterate_slots(struct bch_fs *c, u64 nr)
229 struct btree_trans trans;
230 struct btree_iter iter = { NULL };
235 bch2_trans_init(&trans, c, 0, 0);
239 pr_info("inserting test keys");
241 for (i = 0; i < nr; i++) {
242 struct bkey_i_cookie k;
244 bkey_cookie_init(&k.k_i);
245 k.k.p.offset = i * 2;
246 k.k.p.snapshot = U32_MAX;
248 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
251 bch_err(c, "insert error in test_iterate_slots: %i", ret);
256 pr_info("iterating forwards");
260 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
261 SPOS(0, 0, U32_MAX), 0, k, ret) {
265 BUG_ON(k.k->p.offset != i);
268 bch2_trans_iter_exit(&trans, &iter);
272 pr_info("iterating forwards by slots");
276 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
278 BTREE_ITER_SLOTS, k, ret) {
279 BUG_ON(k.k->p.offset != i);
280 BUG_ON(bkey_deleted(k.k) != (i & 1));
286 bch2_trans_iter_exit(&trans, &iter);
288 bch2_trans_exit(&trans);
292 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
294 struct btree_trans trans;
295 struct btree_iter iter = { NULL };
300 bch2_trans_init(&trans, c, 0, 0);
304 pr_info("inserting test keys");
306 for (i = 0; i < nr; i += 16) {
307 struct bkey_i_cookie k;
309 bkey_cookie_init(&k.k_i);
310 k.k.p.offset = i + 16;
311 k.k.p.snapshot = U32_MAX;
314 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
317 bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
322 pr_info("iterating forwards");
326 for_each_btree_key(&trans, iter, BTREE_ID_extents,
327 SPOS(0, 0, U32_MAX), 0, k, ret) {
328 BUG_ON(bkey_start_offset(k.k) != i + 8);
329 BUG_ON(k.k->size != 8);
332 bch2_trans_iter_exit(&trans, &iter);
336 pr_info("iterating forwards by slots");
340 for_each_btree_key(&trans, iter, BTREE_ID_extents,
342 BTREE_ITER_SLOTS, k, ret) {
343 BUG_ON(bkey_deleted(k.k) != !(i % 16));
345 BUG_ON(bkey_start_offset(k.k) != i);
346 BUG_ON(k.k->size != 8);
352 bch2_trans_iter_exit(&trans, &iter);
354 bch2_trans_exit(&trans);
359 * XXX: we really want to make sure we've got a btree with depth > 0 for these
362 static int test_peek_end(struct bch_fs *c, u64 nr)
364 struct btree_trans trans;
365 struct btree_iter iter;
368 bch2_trans_init(&trans, c, 0, 0);
369 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
370 SPOS(0, 0, U32_MAX), 0);
372 k = bch2_btree_iter_peek(&iter);
375 k = bch2_btree_iter_peek(&iter);
378 bch2_trans_iter_exit(&trans, &iter);
379 bch2_trans_exit(&trans);
383 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
385 struct btree_trans trans;
386 struct btree_iter iter;
389 bch2_trans_init(&trans, c, 0, 0);
390 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
391 SPOS(0, 0, U32_MAX), 0);
393 k = bch2_btree_iter_peek(&iter);
396 k = bch2_btree_iter_peek(&iter);
399 bch2_trans_iter_exit(&trans, &iter);
400 bch2_trans_exit(&trans);
404 /* extent unit tests */
408 static int insert_test_extent(struct bch_fs *c,
411 struct bkey_i_cookie k;
414 bkey_cookie_init(&k.k_i);
415 k.k_i.k.p.offset = end;
416 k.k_i.k.p.snapshot = U32_MAX;
417 k.k_i.k.size = end - start;
418 k.k_i.k.version.lo = test_version++;
420 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
423 bch_err(c, "insert error in insert_test_extent: %i", ret);
427 static int __test_extent_overwrite(struct bch_fs *c,
428 u64 e1_start, u64 e1_end,
429 u64 e2_start, u64 e2_end)
433 ret = insert_test_extent(c, e1_start, e1_end) ?:
434 insert_test_extent(c, e2_start, e2_end);
440 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
442 return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
443 __test_extent_overwrite(c, 8, 64, 0, 32);
446 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
448 return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
449 __test_extent_overwrite(c, 0, 64, 32, 72);
452 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
454 return __test_extent_overwrite(c, 0, 64, 32, 40);
457 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
459 return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
460 __test_extent_overwrite(c, 32, 64, 0, 128) ?:
461 __test_extent_overwrite(c, 32, 64, 32, 64) ?:
462 __test_extent_overwrite(c, 32, 64, 32, 128);
465 /* snapshot unit tests */
467 /* Test skipping over keys in unrelated snapshots: */
468 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
470 struct btree_trans trans;
471 struct btree_iter iter;
473 struct bkey_i_cookie cookie;
476 bkey_cookie_init(&cookie.k_i);
477 cookie.k.p.snapshot = snapid_hi;
478 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
483 bch2_trans_init(&trans, c, 0, 0);
484 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
485 SPOS(0, 0, snapid_lo), 0);
486 k = bch2_btree_iter_peek(&iter);
488 BUG_ON(k.k->p.snapshot != U32_MAX);
490 bch2_trans_iter_exit(&trans, &iter);
491 bch2_trans_exit(&trans);
495 static int test_snapshots(struct bch_fs *c, u64 nr)
497 struct bkey_i_cookie cookie;
499 u32 snapid_subvols[2] = { 1, 1 };
502 bkey_cookie_init(&cookie.k_i);
503 cookie.k.p.snapshot = U32_MAX;
504 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i,
509 ret = bch2_trans_do(c, NULL, NULL, 0,
510 bch2_snapshot_node_create(&trans, U32_MAX,
517 if (snapids[0] > snapids[1])
518 swap(snapids[0], snapids[1]);
520 ret = test_snapshot_filter(c, snapids[0], snapids[1]);
522 bch_err(c, "err %i from test_snapshot_filter", ret);
531 static u64 test_rand(void)
537 prandom_bytes(&v, sizeof(v));
542 static int rand_insert(struct bch_fs *c, u64 nr)
544 struct btree_trans trans;
545 struct bkey_i_cookie k;
549 bch2_trans_init(&trans, c, 0, 0);
551 for (i = 0; i < nr; i++) {
552 bkey_cookie_init(&k.k_i);
553 k.k.p.offset = test_rand();
554 k.k.p.snapshot = U32_MAX;
556 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
557 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
559 bch_err(c, "error in rand_insert: %i", ret);
564 bch2_trans_exit(&trans);
568 static int rand_insert_multi(struct bch_fs *c, u64 nr)
570 struct btree_trans trans;
571 struct bkey_i_cookie k[8];
576 bch2_trans_init(&trans, c, 0, 0);
578 for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
579 for (j = 0; j < ARRAY_SIZE(k); j++) {
580 bkey_cookie_init(&k[j].k_i);
581 k[j].k.p.offset = test_rand();
582 k[j].k.p.snapshot = U32_MAX;
585 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
586 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?:
587 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?:
588 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?:
589 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?:
590 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?:
591 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?:
592 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
593 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
595 bch_err(c, "error in rand_insert_multi: %i", ret);
600 bch2_trans_exit(&trans);
604 static int rand_lookup(struct bch_fs *c, u64 nr)
606 struct btree_trans trans;
607 struct btree_iter iter;
612 bch2_trans_init(&trans, c, 0, 0);
613 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
614 SPOS(0, 0, U32_MAX), 0);
616 for (i = 0; i < nr; i++) {
617 bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
619 k = bch2_btree_iter_peek(&iter);
622 bch_err(c, "error in rand_lookup: %i", ret);
627 bch2_trans_iter_exit(&trans, &iter);
628 bch2_trans_exit(&trans);
632 static int rand_mixed_trans(struct btree_trans *trans,
633 struct btree_iter *iter,
634 struct bkey_i_cookie *cookie,
640 bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
642 k = bch2_btree_iter_peek(iter);
644 if (ret && ret != -EINTR)
645 bch_err(trans->c, "lookup error in rand_mixed: %i", ret);
649 if (!(i & 3) && k.k) {
650 bkey_cookie_init(&cookie->k_i);
651 cookie->k.p = iter->pos;
652 ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
658 static int rand_mixed(struct bch_fs *c, u64 nr)
660 struct btree_trans trans;
661 struct btree_iter iter;
662 struct bkey_i_cookie cookie;
666 bch2_trans_init(&trans, c, 0, 0);
667 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
668 SPOS(0, 0, U32_MAX), 0);
670 for (i = 0; i < nr; i++) {
672 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
673 rand_mixed_trans(&trans, &iter, &cookie, i, rand));
675 bch_err(c, "update error in rand_mixed: %i", ret);
680 bch2_trans_iter_exit(&trans, &iter);
681 bch2_trans_exit(&trans);
685 static int __do_delete(struct btree_trans *trans, struct bpos pos)
687 struct btree_iter iter;
691 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
693 k = bch2_btree_iter_peek(&iter);
701 ret = bch2_btree_delete_at(trans, &iter, 0);
703 bch2_trans_iter_exit(trans, &iter);
707 static int rand_delete(struct bch_fs *c, u64 nr)
709 struct btree_trans trans;
713 bch2_trans_init(&trans, c, 0, 0);
715 for (i = 0; i < nr; i++) {
716 struct bpos pos = SPOS(0, test_rand(), U32_MAX);
718 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
719 __do_delete(&trans, pos));
721 bch_err(c, "error in rand_delete: %i", ret);
726 bch2_trans_exit(&trans);
730 static int seq_insert(struct bch_fs *c, u64 nr)
732 struct btree_trans trans;
733 struct btree_iter iter;
735 struct bkey_i_cookie insert;
739 bkey_cookie_init(&insert.k_i);
741 bch2_trans_init(&trans, c, 0, 0);
743 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX),
744 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
745 insert.k.p = iter.pos;
747 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
748 bch2_btree_iter_traverse(&iter) ?:
749 bch2_trans_update(&trans, &iter, &insert.k_i, 0));
751 bch_err(c, "error in seq_insert: %i", ret);
758 bch2_trans_iter_exit(&trans, &iter);
760 bch2_trans_exit(&trans);
764 static int seq_lookup(struct bch_fs *c, u64 nr)
766 struct btree_trans trans;
767 struct btree_iter iter;
771 bch2_trans_init(&trans, c, 0, 0);
773 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
774 SPOS(0, 0, U32_MAX), 0, k, ret)
776 bch2_trans_iter_exit(&trans, &iter);
778 bch2_trans_exit(&trans);
782 static int seq_overwrite(struct bch_fs *c, u64 nr)
784 struct btree_trans trans;
785 struct btree_iter iter;
789 bch2_trans_init(&trans, c, 0, 0);
791 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
793 BTREE_ITER_INTENT, k, ret) {
794 struct bkey_i_cookie u;
796 bkey_reassemble(&u.k_i, k);
798 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
799 bch2_btree_iter_traverse(&iter) ?:
800 bch2_trans_update(&trans, &iter, &u.k_i, 0));
802 bch_err(c, "error in seq_overwrite: %i", ret);
806 bch2_trans_iter_exit(&trans, &iter);
808 bch2_trans_exit(&trans);
812 static int seq_delete(struct bch_fs *c, u64 nr)
816 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
818 BTREE_ITER_ALL_SNAPSHOTS,
821 bch_err(c, "error in seq_delete: %i", ret);
825 typedef int (*perf_test_fn)(struct bch_fs *, u64);
834 wait_queue_head_t ready_wait;
837 struct completion done_completion;
844 static int btree_perf_test_thread(void *data)
846 struct test_job *j = data;
849 if (atomic_dec_and_test(&j->ready)) {
850 wake_up(&j->ready_wait);
851 j->start = sched_clock();
853 wait_event(j->ready_wait, !atomic_read(&j->ready));
856 ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
858 bch_err(j->c, "%ps: error %i", j->fn, ret);
862 if (atomic_dec_and_test(&j->done)) {
863 j->finish = sched_clock();
864 complete(&j->done_completion);
870 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
871 u64 nr, unsigned nr_threads)
873 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
875 struct printbuf nr_buf = PRINTBUF;
876 struct printbuf per_sec_buf = PRINTBUF;
880 atomic_set(&j.ready, nr_threads);
881 init_waitqueue_head(&j.ready_wait);
883 atomic_set(&j.done, nr_threads);
884 init_completion(&j.done_completion);
886 #define perf_test(_test) \
887 if (!strcmp(testname, #_test)) j.fn = _test
889 perf_test(rand_insert);
890 perf_test(rand_insert_multi);
891 perf_test(rand_lookup);
892 perf_test(rand_mixed);
893 perf_test(rand_delete);
895 perf_test(seq_insert);
896 perf_test(seq_lookup);
897 perf_test(seq_overwrite);
898 perf_test(seq_delete);
900 /* a unit test, not a perf test: */
901 perf_test(test_delete);
902 perf_test(test_delete_written);
903 perf_test(test_iterate);
904 perf_test(test_iterate_extents);
905 perf_test(test_iterate_slots);
906 perf_test(test_iterate_slots_extents);
907 perf_test(test_peek_end);
908 perf_test(test_peek_end_extents);
910 perf_test(test_extent_overwrite_front);
911 perf_test(test_extent_overwrite_back);
912 perf_test(test_extent_overwrite_middle);
913 perf_test(test_extent_overwrite_all);
915 perf_test(test_snapshots);
918 pr_err("unknown test %s", testname);
922 //pr_info("running test %s:", testname);
925 btree_perf_test_thread(&j);
927 for (i = 0; i < nr_threads; i++)
928 kthread_run(btree_perf_test_thread, &j,
929 "bcachefs perf test[%u]", i);
931 while (wait_for_completion_interruptible(&j.done_completion))
934 time = j.finish - j.start;
936 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
937 bch2_hprint(&nr_buf, nr);
938 bch2_hprint(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
939 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
940 name_buf, nr_buf.buf, nr_threads,
941 div_u64(time, NSEC_PER_SEC),
942 div_u64(time * nr_threads, nr),
944 printbuf_exit(&per_sec_buf);
945 printbuf_exit(&nr_buf);
949 #endif /* CONFIG_BCACHEFS_TESTS */