1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
10 #include "linux/kthread.h"
11 #include "linux/random.h"
13 static void delete_test_keys(struct bch_fs *c)
17 ret = bch2_btree_delete_range(c, BTREE_ID_extents,
23 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
32 static int test_delete(struct bch_fs *c, u64 nr)
34 struct btree_trans *trans = bch2_trans_get(c);
35 struct btree_iter iter;
36 struct bkey_i_cookie k;
39 bkey_cookie_init(&k.k_i);
40 k.k.p.snapshot = U32_MAX;
42 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
45 ret = commit_do(trans, NULL, NULL, 0,
46 bch2_btree_iter_traverse(&iter) ?:
47 bch2_trans_update(trans, &iter, &k.k_i, 0));
48 bch_err_msg(c, ret, "update error");
52 pr_info("deleting once");
53 ret = commit_do(trans, NULL, NULL, 0,
54 bch2_btree_iter_traverse(&iter) ?:
55 bch2_btree_delete_at(trans, &iter, 0));
56 bch_err_msg(c, ret, "delete error (first)");
60 pr_info("deleting twice");
61 ret = commit_do(trans, NULL, NULL, 0,
62 bch2_btree_iter_traverse(&iter) ?:
63 bch2_btree_delete_at(trans, &iter, 0));
64 bch_err_msg(c, ret, "delete error (second)");
68 bch2_trans_iter_exit(trans, &iter);
69 bch2_trans_put(trans);
73 static int test_delete_written(struct bch_fs *c, u64 nr)
75 struct btree_trans *trans = bch2_trans_get(c);
76 struct btree_iter iter;
77 struct bkey_i_cookie k;
80 bkey_cookie_init(&k.k_i);
81 k.k.p.snapshot = U32_MAX;
83 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
86 ret = commit_do(trans, NULL, NULL, 0,
87 bch2_btree_iter_traverse(&iter) ?:
88 bch2_trans_update(trans, &iter, &k.k_i, 0));
89 bch_err_msg(c, ret, "update error");
93 bch2_trans_unlock(trans);
94 bch2_journal_flush_all_pins(&c->journal);
96 ret = commit_do(trans, NULL, NULL, 0,
97 bch2_btree_iter_traverse(&iter) ?:
98 bch2_btree_delete_at(trans, &iter, 0));
99 bch_err_msg(c, ret, "delete error");
103 bch2_trans_iter_exit(trans, &iter);
104 bch2_trans_put(trans);
108 static int test_iterate(struct bch_fs *c, u64 nr)
110 struct btree_trans *trans = bch2_trans_get(c);
111 struct btree_iter iter = { NULL };
118 pr_info("inserting test keys");
120 for (i = 0; i < nr; i++) {
121 struct bkey_i_cookie ck;
123 bkey_cookie_init(&ck.k_i);
125 ck.k.p.snapshot = U32_MAX;
127 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
128 bch_err_msg(c, ret, "insert error");
133 pr_info("iterating forwards");
137 ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
138 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
140 BUG_ON(k.k->p.offset != i++);
143 bch_err_msg(c, ret, "error iterating forwards");
149 pr_info("iterating backwards");
151 ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
152 SPOS(0, U64_MAX, U32_MAX), 0, k,
154 BUG_ON(k.k->p.offset != --i);
157 bch_err_msg(c, ret, "error iterating backwards");
163 bch2_trans_iter_exit(trans, &iter);
164 bch2_trans_put(trans);
168 static int test_iterate_extents(struct bch_fs *c, u64 nr)
170 struct btree_trans *trans = bch2_trans_get(c);
171 struct btree_iter iter = { NULL };
178 pr_info("inserting test extents");
180 for (i = 0; i < nr; i += 8) {
181 struct bkey_i_cookie ck;
183 bkey_cookie_init(&ck.k_i);
184 ck.k.p.offset = i + 8;
185 ck.k.p.snapshot = U32_MAX;
188 ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
189 bch_err_msg(c, ret, "insert error");
194 pr_info("iterating forwards");
198 ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
199 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
201 BUG_ON(bkey_start_offset(k.k) != i);
205 bch_err_msg(c, ret, "error iterating forwards");
211 pr_info("iterating backwards");
213 ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
214 SPOS(0, U64_MAX, U32_MAX), 0, k,
216 BUG_ON(k.k->p.offset != i);
217 i = bkey_start_offset(k.k);
220 bch_err_msg(c, ret, "error iterating backwards");
226 bch2_trans_iter_exit(trans, &iter);
227 bch2_trans_put(trans);
231 static int test_iterate_slots(struct bch_fs *c, u64 nr)
233 struct btree_trans *trans = bch2_trans_get(c);
234 struct btree_iter iter = { NULL };
241 pr_info("inserting test keys");
243 for (i = 0; i < nr; i++) {
244 struct bkey_i_cookie ck;
246 bkey_cookie_init(&ck.k_i);
247 ck.k.p.offset = i * 2;
248 ck.k.p.snapshot = U32_MAX;
250 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
251 bch_err_msg(c, ret, "insert error");
256 pr_info("iterating forwards");
260 ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
261 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
263 BUG_ON(k.k->p.offset != i);
267 bch_err_msg(c, ret, "error iterating forwards");
273 pr_info("iterating forwards by slots");
277 ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
278 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
279 BTREE_ITER_SLOTS, k, ({
283 BUG_ON(k.k->p.offset != i);
284 BUG_ON(bkey_deleted(k.k) != (i & 1));
290 bch_err_msg(c, ret, "error iterating forwards by slots");
295 bch2_trans_put(trans);
299 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
301 struct btree_trans *trans = bch2_trans_get(c);
302 struct btree_iter iter = { NULL };
309 pr_info("inserting test keys");
311 for (i = 0; i < nr; i += 16) {
312 struct bkey_i_cookie ck;
314 bkey_cookie_init(&ck.k_i);
315 ck.k.p.offset = i + 16;
316 ck.k.p.snapshot = U32_MAX;
319 ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
320 bch_err_msg(c, ret, "insert error");
325 pr_info("iterating forwards");
329 ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
330 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
332 BUG_ON(bkey_start_offset(k.k) != i + 8);
333 BUG_ON(k.k->size != 8);
337 bch_err_msg(c, ret, "error iterating forwards");
343 pr_info("iterating forwards by slots");
347 ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
348 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
349 BTREE_ITER_SLOTS, k, ({
352 BUG_ON(bkey_deleted(k.k) != !(i % 16));
354 BUG_ON(bkey_start_offset(k.k) != i);
355 BUG_ON(k.k->size != 8);
359 bch_err_msg(c, ret, "error iterating forwards by slots");
364 bch2_trans_put(trans);
369 * XXX: we really want to make sure we've got a btree with depth > 0 for these
372 static int test_peek_end(struct bch_fs *c, u64 nr)
374 struct btree_trans *trans = bch2_trans_get(c);
375 struct btree_iter iter;
378 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
379 SPOS(0, 0, U32_MAX), 0);
381 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
384 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
387 bch2_trans_iter_exit(trans, &iter);
388 bch2_trans_put(trans);
392 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
394 struct btree_trans *trans = bch2_trans_get(c);
395 struct btree_iter iter;
398 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
399 SPOS(0, 0, U32_MAX), 0);
401 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
404 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
407 bch2_trans_iter_exit(trans, &iter);
408 bch2_trans_put(trans);
412 /* extent unit tests */
414 static u64 test_version;
416 static int insert_test_extent(struct bch_fs *c,
419 struct bkey_i_cookie k;
422 bkey_cookie_init(&k.k_i);
423 k.k_i.k.p.offset = end;
424 k.k_i.k.p.snapshot = U32_MAX;
425 k.k_i.k.size = end - start;
426 k.k_i.k.version.lo = test_version++;
428 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0);
433 static int __test_extent_overwrite(struct bch_fs *c,
434 u64 e1_start, u64 e1_end,
435 u64 e2_start, u64 e2_end)
439 ret = insert_test_extent(c, e1_start, e1_end) ?:
440 insert_test_extent(c, e2_start, e2_end);
446 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
448 return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
449 __test_extent_overwrite(c, 8, 64, 0, 32);
452 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
454 return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
455 __test_extent_overwrite(c, 0, 64, 32, 72);
458 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
460 return __test_extent_overwrite(c, 0, 64, 32, 40);
463 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
465 return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
466 __test_extent_overwrite(c, 32, 64, 0, 128) ?:
467 __test_extent_overwrite(c, 32, 64, 32, 64) ?:
468 __test_extent_overwrite(c, 32, 64, 32, 128);
471 static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start, u32 len, u32 snapid)
473 struct bkey_i_cookie k;
476 bkey_cookie_init(&k.k_i);
477 k.k_i.k.p.inode = inum;
478 k.k_i.k.p.offset = start + len;
479 k.k_i.k.p.snapshot = snapid;
482 ret = bch2_trans_do(c, NULL, NULL, 0,
483 bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
484 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
489 static int test_extent_create_overlapping(struct bch_fs *c, u64 inum)
491 return insert_test_overlapping_extent(c, inum, 0, 16, U32_MAX - 2) ?: /* overwrite entire */
492 insert_test_overlapping_extent(c, inum, 2, 8, U32_MAX - 2) ?:
493 insert_test_overlapping_extent(c, inum, 4, 4, U32_MAX) ?:
494 insert_test_overlapping_extent(c, inum, 32, 8, U32_MAX - 2) ?: /* overwrite front/back */
495 insert_test_overlapping_extent(c, inum, 36, 8, U32_MAX) ?:
496 insert_test_overlapping_extent(c, inum, 60, 8, U32_MAX - 2) ?:
497 insert_test_overlapping_extent(c, inum, 64, 8, U32_MAX);
500 /* snapshot unit tests */
502 /* Test skipping over keys in unrelated snapshots: */
503 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
505 struct btree_trans *trans;
506 struct btree_iter iter;
508 struct bkey_i_cookie cookie;
511 bkey_cookie_init(&cookie.k_i);
512 cookie.k.p.snapshot = snapid_hi;
513 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0);
517 trans = bch2_trans_get(c);
518 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
519 SPOS(0, 0, snapid_lo), 0);
520 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
522 BUG_ON(k.k->p.snapshot != U32_MAX);
524 bch2_trans_iter_exit(trans, &iter);
525 bch2_trans_put(trans);
529 static int test_snapshots(struct bch_fs *c, u64 nr)
531 struct bkey_i_cookie cookie;
533 u32 snapid_subvols[2] = { 1, 1 };
536 bkey_cookie_init(&cookie.k_i);
537 cookie.k.p.snapshot = U32_MAX;
538 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0);
542 ret = bch2_trans_do(c, NULL, NULL, 0,
543 bch2_snapshot_node_create(trans, U32_MAX,
550 if (snapids[0] > snapids[1])
551 swap(snapids[0], snapids[1]);
553 ret = test_snapshot_filter(c, snapids[0], snapids[1]);
554 bch_err_msg(c, ret, "from test_snapshot_filter");
560 static u64 test_rand(void)
564 get_random_bytes(&v, sizeof(v));
568 static int rand_insert(struct bch_fs *c, u64 nr)
570 struct btree_trans *trans = bch2_trans_get(c);
571 struct bkey_i_cookie k;
575 for (i = 0; i < nr; i++) {
576 bkey_cookie_init(&k.k_i);
577 k.k.p.offset = test_rand();
578 k.k.p.snapshot = U32_MAX;
580 ret = commit_do(trans, NULL, NULL, 0,
581 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k.k_i, 0));
586 bch2_trans_put(trans);
590 static int rand_insert_multi(struct bch_fs *c, u64 nr)
592 struct btree_trans *trans = bch2_trans_get(c);
593 struct bkey_i_cookie k[8];
598 for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
599 for (j = 0; j < ARRAY_SIZE(k); j++) {
600 bkey_cookie_init(&k[j].k_i);
601 k[j].k.p.offset = test_rand();
602 k[j].k.p.snapshot = U32_MAX;
605 ret = commit_do(trans, NULL, NULL, 0,
606 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?:
607 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?:
608 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?:
609 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[3].k_i, 0) ?:
610 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[4].k_i, 0) ?:
611 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?:
612 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?:
613 bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[7].k_i, 0));
618 bch2_trans_put(trans);
622 static int rand_lookup(struct bch_fs *c, u64 nr)
624 struct btree_trans *trans = bch2_trans_get(c);
625 struct btree_iter iter;
630 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
631 SPOS(0, 0, U32_MAX), 0);
633 for (i = 0; i < nr; i++) {
634 bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
636 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
642 bch2_trans_iter_exit(trans, &iter);
643 bch2_trans_put(trans);
647 static int rand_mixed_trans(struct btree_trans *trans,
648 struct btree_iter *iter,
649 struct bkey_i_cookie *cookie,
655 bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
657 k = bch2_btree_iter_peek(iter);
659 bch_err_msg(trans->c, ret, "lookup error");
663 if (!(i & 3) && k.k) {
664 bkey_cookie_init(&cookie->k_i);
665 cookie->k.p = iter->pos;
666 ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
672 static int rand_mixed(struct bch_fs *c, u64 nr)
674 struct btree_trans *trans = bch2_trans_get(c);
675 struct btree_iter iter;
676 struct bkey_i_cookie cookie;
680 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
681 SPOS(0, 0, U32_MAX), 0);
683 for (i = 0; i < nr; i++) {
685 ret = commit_do(trans, NULL, NULL, 0,
686 rand_mixed_trans(trans, &iter, &cookie, i, rand));
691 bch2_trans_iter_exit(trans, &iter);
692 bch2_trans_put(trans);
696 static int __do_delete(struct btree_trans *trans, struct bpos pos)
698 struct btree_iter iter;
702 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
704 k = bch2_btree_iter_peek(&iter);
712 ret = bch2_btree_delete_at(trans, &iter, 0);
714 bch2_trans_iter_exit(trans, &iter);
718 static int rand_delete(struct bch_fs *c, u64 nr)
720 struct btree_trans *trans = bch2_trans_get(c);
724 for (i = 0; i < nr; i++) {
725 struct bpos pos = SPOS(0, test_rand(), U32_MAX);
727 ret = commit_do(trans, NULL, NULL, 0,
728 __do_delete(trans, pos));
733 bch2_trans_put(trans);
737 static int seq_insert(struct bch_fs *c, u64 nr)
739 struct btree_iter iter;
741 struct bkey_i_cookie insert;
743 bkey_cookie_init(&insert.k_i);
745 return bch2_trans_run(c,
746 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
748 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
750 if (iter.pos.offset >= nr)
752 insert.k.p = iter.pos;
753 bch2_trans_update(trans, &iter, &insert.k_i, 0);
757 static int seq_lookup(struct bch_fs *c, u64 nr)
759 struct btree_iter iter;
762 return bch2_trans_run(c,
763 for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
764 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
769 static int seq_overwrite(struct bch_fs *c, u64 nr)
771 struct btree_iter iter;
774 return bch2_trans_run(c,
775 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
777 BTREE_ITER_INTENT, k,
779 struct bkey_i_cookie u;
781 bkey_reassemble(&u.k_i, k);
782 bch2_trans_update(trans, &iter, &u.k_i, 0);
786 static int seq_delete(struct bch_fs *c, u64 nr)
788 return bch2_btree_delete_range(c, BTREE_ID_xattrs,
794 typedef int (*perf_test_fn)(struct bch_fs *, u64);
803 wait_queue_head_t ready_wait;
806 struct completion done_completion;
813 static int btree_perf_test_thread(void *data)
815 struct test_job *j = data;
818 if (atomic_dec_and_test(&j->ready)) {
819 wake_up(&j->ready_wait);
820 j->start = sched_clock();
822 wait_event(j->ready_wait, !atomic_read(&j->ready));
825 ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
827 bch_err(j->c, "%ps: error %s", j->fn, bch2_err_str(ret));
831 if (atomic_dec_and_test(&j->done)) {
832 j->finish = sched_clock();
833 complete(&j->done_completion);
839 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
840 u64 nr, unsigned nr_threads)
842 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
844 struct printbuf nr_buf = PRINTBUF;
845 struct printbuf per_sec_buf = PRINTBUF;
849 atomic_set(&j.ready, nr_threads);
850 init_waitqueue_head(&j.ready_wait);
852 atomic_set(&j.done, nr_threads);
853 init_completion(&j.done_completion);
855 #define perf_test(_test) \
856 if (!strcmp(testname, #_test)) j.fn = _test
858 perf_test(rand_insert);
859 perf_test(rand_insert_multi);
860 perf_test(rand_lookup);
861 perf_test(rand_mixed);
862 perf_test(rand_delete);
864 perf_test(seq_insert);
865 perf_test(seq_lookup);
866 perf_test(seq_overwrite);
867 perf_test(seq_delete);
869 /* a unit test, not a perf test: */
870 perf_test(test_delete);
871 perf_test(test_delete_written);
872 perf_test(test_iterate);
873 perf_test(test_iterate_extents);
874 perf_test(test_iterate_slots);
875 perf_test(test_iterate_slots_extents);
876 perf_test(test_peek_end);
877 perf_test(test_peek_end_extents);
879 perf_test(test_extent_overwrite_front);
880 perf_test(test_extent_overwrite_back);
881 perf_test(test_extent_overwrite_middle);
882 perf_test(test_extent_overwrite_all);
883 perf_test(test_extent_create_overlapping);
885 perf_test(test_snapshots);
888 pr_err("unknown test %s", testname);
892 //pr_info("running test %s:", testname);
895 btree_perf_test_thread(&j);
897 for (i = 0; i < nr_threads; i++)
898 kthread_run(btree_perf_test_thread, &j,
899 "bcachefs perf test[%u]", i);
901 while (wait_for_completion_interruptible(&j.done_completion))
904 time = j.finish - j.start;
906 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
907 prt_human_readable_u64(&nr_buf, nr);
908 prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
909 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
910 name_buf, nr_buf.buf, nr_threads,
911 div_u64(time, NSEC_PER_SEC),
912 div_u64(time * nr_threads, nr),
914 printbuf_exit(&per_sec_buf);
915 printbuf_exit(&nr_buf);
919 #endif /* CONFIG_BCACHEFS_TESTS */