1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
9 #include "linux/kthread.h"
10 #include "linux/random.h"
12 static void delete_test_keys(struct bch_fs *c)
16 ret = bch2_btree_delete_range(c, BTREE_ID_extents,
17 POS(0, 0), POS(0, U64_MAX),
21 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
22 POS(0, 0), POS(0, U64_MAX),
29 static int test_delete(struct bch_fs *c, u64 nr)
31 struct btree_trans trans;
32 struct btree_iter iter;
33 struct bkey_i_cookie k;
36 bkey_cookie_init(&k.k_i);
37 k.k.p.snapshot = U32_MAX;
39 bch2_trans_init(&trans, c, 0, 0);
40 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
43 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
44 bch2_btree_iter_traverse(&iter) ?:
45 bch2_trans_update(&trans, &iter, &k.k_i, 0));
47 bch_err(c, "update error in test_delete: %i", ret);
51 pr_info("deleting once");
52 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
53 bch2_btree_iter_traverse(&iter) ?:
54 bch2_btree_delete_at(&trans, &iter, 0));
56 bch_err(c, "delete error (first) in test_delete: %i", ret);
60 pr_info("deleting twice");
61 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
62 bch2_btree_iter_traverse(&iter) ?:
63 bch2_btree_delete_at(&trans, &iter, 0));
65 bch_err(c, "delete error (second) in test_delete: %i", ret);
69 bch2_trans_iter_exit(&trans, &iter);
70 bch2_trans_exit(&trans);
74 static int test_delete_written(struct bch_fs *c, u64 nr)
76 struct btree_trans trans;
77 struct btree_iter iter;
78 struct bkey_i_cookie k;
81 bkey_cookie_init(&k.k_i);
82 k.k.p.snapshot = U32_MAX;
84 bch2_trans_init(&trans, c, 0, 0);
86 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p,
89 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
90 bch2_btree_iter_traverse(&iter) ?:
91 bch2_trans_update(&trans, &iter, &k.k_i, 0));
93 bch_err(c, "update error in test_delete_written: %i", ret);
97 bch2_trans_unlock(&trans);
98 bch2_journal_flush_all_pins(&c->journal);
100 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
101 bch2_btree_iter_traverse(&iter) ?:
102 bch2_btree_delete_at(&trans, &iter, 0));
104 bch_err(c, "delete error in test_delete_written: %i", ret);
108 bch2_trans_iter_exit(&trans, &iter);
109 bch2_trans_exit(&trans);
113 static int test_iterate(struct bch_fs *c, u64 nr)
115 struct btree_trans trans;
116 struct btree_iter iter = { NULL };
121 bch2_trans_init(&trans, c, 0, 0);
125 pr_info("inserting test keys");
127 for (i = 0; i < nr; i++) {
128 struct bkey_i_cookie k;
130 bkey_cookie_init(&k.k_i);
132 k.k.p.snapshot = U32_MAX;
134 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
137 bch_err(c, "insert error in test_iterate: %i", ret);
142 pr_info("iterating forwards");
146 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
147 POS_MIN, 0, k, ret) {
151 BUG_ON(k.k->p.offset != i++);
156 pr_info("iterating backwards");
158 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
159 BUG_ON(k.k->p.offset != --i);
163 bch2_trans_iter_exit(&trans, &iter);
164 bch2_trans_exit(&trans);
168 static int test_iterate_extents(struct bch_fs *c, u64 nr)
170 struct btree_trans trans;
171 struct btree_iter iter = { NULL };
176 bch2_trans_init(&trans, c, 0, 0);
180 pr_info("inserting test extents");
182 for (i = 0; i < nr; i += 8) {
183 struct bkey_i_cookie k;
185 bkey_cookie_init(&k.k_i);
186 k.k.p.offset = i + 8;
187 k.k.p.snapshot = U32_MAX;
190 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
193 bch_err(c, "insert error in test_iterate_extents: %i", ret);
198 pr_info("iterating forwards");
202 for_each_btree_key(&trans, iter, BTREE_ID_extents,
203 POS_MIN, 0, k, ret) {
204 BUG_ON(bkey_start_offset(k.k) != i);
210 pr_info("iterating backwards");
212 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
213 BUG_ON(k.k->p.offset != i);
214 i = bkey_start_offset(k.k);
219 bch2_trans_iter_exit(&trans, &iter);
220 bch2_trans_exit(&trans);
224 static int test_iterate_slots(struct bch_fs *c, u64 nr)
226 struct btree_trans trans;
227 struct btree_iter iter = { NULL };
232 bch2_trans_init(&trans, c, 0, 0);
236 pr_info("inserting test keys");
238 for (i = 0; i < nr; i++) {
239 struct bkey_i_cookie k;
241 bkey_cookie_init(&k.k_i);
242 k.k.p.offset = i * 2;
243 k.k.p.snapshot = U32_MAX;
245 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
248 bch_err(c, "insert error in test_iterate_slots: %i", ret);
253 pr_info("iterating forwards");
257 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
262 BUG_ON(k.k->p.offset != i);
265 bch2_trans_iter_exit(&trans, &iter);
269 pr_info("iterating forwards by slots");
273 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
274 BTREE_ITER_SLOTS, k, ret) {
275 BUG_ON(k.k->p.offset != i);
276 BUG_ON(bkey_deleted(k.k) != (i & 1));
282 bch2_trans_iter_exit(&trans, &iter);
284 bch2_trans_exit(&trans);
288 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
290 struct btree_trans trans;
291 struct btree_iter iter = { NULL };
296 bch2_trans_init(&trans, c, 0, 0);
300 pr_info("inserting test keys");
302 for (i = 0; i < nr; i += 16) {
303 struct bkey_i_cookie k;
305 bkey_cookie_init(&k.k_i);
306 k.k.p.offset = i + 16;
307 k.k.p.snapshot = U32_MAX;
310 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
313 bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
318 pr_info("iterating forwards");
322 for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
324 BUG_ON(bkey_start_offset(k.k) != i + 8);
325 BUG_ON(k.k->size != 8);
328 bch2_trans_iter_exit(&trans, &iter);
332 pr_info("iterating forwards by slots");
336 for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
337 BTREE_ITER_SLOTS, k, ret) {
338 BUG_ON(bkey_deleted(k.k) != !(i % 16));
340 BUG_ON(bkey_start_offset(k.k) != i);
341 BUG_ON(k.k->size != 8);
347 bch2_trans_iter_exit(&trans, &iter);
349 bch2_trans_exit(&trans);
354 * XXX: we really want to make sure we've got a btree with depth > 0 for these
357 static int test_peek_end(struct bch_fs *c, u64 nr)
359 struct btree_trans trans;
360 struct btree_iter iter;
363 bch2_trans_init(&trans, c, 0, 0);
364 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, POS_MIN, 0);
366 k = bch2_btree_iter_peek(&iter);
369 k = bch2_btree_iter_peek(&iter);
372 bch2_trans_iter_exit(&trans, &iter);
373 bch2_trans_exit(&trans);
377 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
379 struct btree_trans trans;
380 struct btree_iter iter;
383 bch2_trans_init(&trans, c, 0, 0);
384 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, POS_MIN, 0);
386 k = bch2_btree_iter_peek(&iter);
389 k = bch2_btree_iter_peek(&iter);
392 bch2_trans_iter_exit(&trans, &iter);
393 bch2_trans_exit(&trans);
397 /* extent unit tests */
401 static int insert_test_extent(struct bch_fs *c,
404 struct bkey_i_cookie k;
407 //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
409 bkey_cookie_init(&k.k_i);
410 k.k_i.k.p.offset = end;
411 k.k_i.k.p.snapshot = U32_MAX;
412 k.k_i.k.size = end - start;
413 k.k_i.k.version.lo = test_version++;
415 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
418 bch_err(c, "insert error in insert_test_extent: %i", ret);
422 static int __test_extent_overwrite(struct bch_fs *c,
423 u64 e1_start, u64 e1_end,
424 u64 e2_start, u64 e2_end)
428 ret = insert_test_extent(c, e1_start, e1_end) ?:
429 insert_test_extent(c, e2_start, e2_end);
435 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
437 return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
438 __test_extent_overwrite(c, 8, 64, 0, 32);
441 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
443 return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
444 __test_extent_overwrite(c, 0, 64, 32, 72);
447 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
449 return __test_extent_overwrite(c, 0, 64, 32, 40);
452 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
454 return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
455 __test_extent_overwrite(c, 32, 64, 0, 128) ?:
456 __test_extent_overwrite(c, 32, 64, 32, 64) ?:
457 __test_extent_overwrite(c, 32, 64, 32, 128);
462 static u64 test_rand(void)
468 prandom_bytes(&v, sizeof(v));
473 static int rand_insert(struct bch_fs *c, u64 nr)
475 struct btree_trans trans;
476 struct bkey_i_cookie k;
480 bch2_trans_init(&trans, c, 0, 0);
482 for (i = 0; i < nr; i++) {
483 bkey_cookie_init(&k.k_i);
484 k.k.p.offset = test_rand();
485 k.k.p.snapshot = U32_MAX;
487 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
488 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
490 bch_err(c, "error in rand_insert: %i", ret);
495 bch2_trans_exit(&trans);
499 static int rand_insert_multi(struct bch_fs *c, u64 nr)
501 struct btree_trans trans;
502 struct bkey_i_cookie k[8];
507 bch2_trans_init(&trans, c, 0, 0);
509 for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
510 for (j = 0; j < ARRAY_SIZE(k); j++) {
511 bkey_cookie_init(&k[j].k_i);
512 k[j].k.p.offset = test_rand();
513 k[j].k.p.snapshot = U32_MAX;
516 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
517 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?:
518 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?:
519 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?:
520 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?:
521 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?:
522 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?:
523 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
524 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
526 bch_err(c, "error in rand_insert_multi: %i", ret);
531 bch2_trans_exit(&trans);
535 static int rand_lookup(struct bch_fs *c, u64 nr)
537 struct btree_trans trans;
538 struct btree_iter iter;
543 bch2_trans_init(&trans, c, 0, 0);
544 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, POS_MIN, 0);
546 for (i = 0; i < nr; i++) {
547 bch2_btree_iter_set_pos(&iter, POS(0, test_rand()));
549 k = bch2_btree_iter_peek(&iter);
552 bch_err(c, "error in rand_lookup: %i", ret);
557 bch2_trans_iter_exit(&trans, &iter);
558 bch2_trans_exit(&trans);
562 static int rand_mixed_trans(struct btree_trans *trans,
563 struct btree_iter *iter,
564 struct bkey_i_cookie *cookie,
570 bch2_btree_iter_set_pos(iter, POS(0, pos));
572 k = bch2_btree_iter_peek(iter);
574 if (ret && ret != -EINTR)
575 bch_err(trans->c, "lookup error in rand_mixed: %i", ret);
579 if (!(i & 3) && k.k) {
580 bkey_cookie_init(&cookie->k_i);
581 cookie->k.p = iter->pos;
582 bch2_trans_update(trans, iter, &cookie->k_i, 0);
588 static int rand_mixed(struct bch_fs *c, u64 nr)
590 struct btree_trans trans;
591 struct btree_iter iter;
592 struct bkey_i_cookie cookie;
596 bch2_trans_init(&trans, c, 0, 0);
597 bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, POS_MIN, 0);
599 for (i = 0; i < nr; i++) {
601 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
602 rand_mixed_trans(&trans, &iter, &cookie, i, rand));
604 bch_err(c, "update error in rand_mixed: %i", ret);
609 bch2_trans_iter_exit(&trans, &iter);
610 bch2_trans_exit(&trans);
614 static int __do_delete(struct btree_trans *trans, struct bpos pos)
616 struct btree_iter iter;
617 struct bkey_i delete;
621 bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
623 k = bch2_btree_iter_peek(&iter);
631 bkey_init(&delete.k);
634 ret = bch2_trans_update(trans, &iter, &delete, 0);
636 bch2_trans_iter_exit(trans, &iter);
640 static int rand_delete(struct bch_fs *c, u64 nr)
642 struct btree_trans trans;
646 bch2_trans_init(&trans, c, 0, 0);
648 for (i = 0; i < nr; i++) {
649 struct bpos pos = POS(0, test_rand());
651 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
652 __do_delete(&trans, pos));
654 bch_err(c, "error in rand_delete: %i", ret);
659 bch2_trans_exit(&trans);
663 static int seq_insert(struct bch_fs *c, u64 nr)
665 struct btree_trans trans;
666 struct btree_iter iter;
668 struct bkey_i_cookie insert;
672 bkey_cookie_init(&insert.k_i);
674 bch2_trans_init(&trans, c, 0, 0);
676 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
677 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
678 insert.k.p = iter.pos;
680 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
681 bch2_btree_iter_traverse(&iter) ?:
682 bch2_trans_update(&trans, &iter, &insert.k_i, 0));
684 bch_err(c, "error in seq_insert: %i", ret);
691 bch2_trans_iter_exit(&trans, &iter);
693 bch2_trans_exit(&trans);
697 static int seq_lookup(struct bch_fs *c, u64 nr)
699 struct btree_trans trans;
700 struct btree_iter iter;
704 bch2_trans_init(&trans, c, 0, 0);
706 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, 0, k, ret)
708 bch2_trans_iter_exit(&trans, &iter);
710 bch2_trans_exit(&trans);
714 static int seq_overwrite(struct bch_fs *c, u64 nr)
716 struct btree_trans trans;
717 struct btree_iter iter;
721 bch2_trans_init(&trans, c, 0, 0);
723 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
724 BTREE_ITER_INTENT, k, ret) {
725 struct bkey_i_cookie u;
727 bkey_reassemble(&u.k_i, k);
729 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
730 bch2_btree_iter_traverse(&iter) ?:
731 bch2_trans_update(&trans, &iter, &u.k_i, 0));
733 bch_err(c, "error in seq_overwrite: %i", ret);
737 bch2_trans_iter_exit(&trans, &iter);
739 bch2_trans_exit(&trans);
743 static int seq_delete(struct bch_fs *c, u64 nr)
747 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
748 POS(0, 0), POS(0, U64_MAX),
751 bch_err(c, "error in seq_delete: %i", ret);
755 typedef int (*perf_test_fn)(struct bch_fs *, u64);
764 wait_queue_head_t ready_wait;
767 struct completion done_completion;
774 static int btree_perf_test_thread(void *data)
776 struct test_job *j = data;
779 if (atomic_dec_and_test(&j->ready)) {
780 wake_up(&j->ready_wait);
781 j->start = sched_clock();
783 wait_event(j->ready_wait, !atomic_read(&j->ready));
786 ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
790 if (atomic_dec_and_test(&j->done)) {
791 j->finish = sched_clock();
792 complete(&j->done_completion);
798 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
799 u64 nr, unsigned nr_threads)
801 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
802 char name_buf[20], nr_buf[20], per_sec_buf[20];
806 atomic_set(&j.ready, nr_threads);
807 init_waitqueue_head(&j.ready_wait);
809 atomic_set(&j.done, nr_threads);
810 init_completion(&j.done_completion);
812 #define perf_test(_test) \
813 if (!strcmp(testname, #_test)) j.fn = _test
815 perf_test(rand_insert);
816 perf_test(rand_insert_multi);
817 perf_test(rand_lookup);
818 perf_test(rand_mixed);
819 perf_test(rand_delete);
821 perf_test(seq_insert);
822 perf_test(seq_lookup);
823 perf_test(seq_overwrite);
824 perf_test(seq_delete);
826 /* a unit test, not a perf test: */
827 perf_test(test_delete);
828 perf_test(test_delete_written);
829 perf_test(test_iterate);
830 perf_test(test_iterate_extents);
831 perf_test(test_iterate_slots);
832 perf_test(test_iterate_slots_extents);
833 perf_test(test_peek_end);
834 perf_test(test_peek_end_extents);
836 perf_test(test_extent_overwrite_front);
837 perf_test(test_extent_overwrite_back);
838 perf_test(test_extent_overwrite_middle);
839 perf_test(test_extent_overwrite_all);
842 pr_err("unknown test %s", testname);
846 //pr_info("running test %s:", testname);
849 btree_perf_test_thread(&j);
851 for (i = 0; i < nr_threads; i++)
852 kthread_run(btree_perf_test_thread, &j,
853 "bcachefs perf test[%u]", i);
855 while (wait_for_completion_interruptible(&j.done_completion))
858 time = j.finish - j.start;
860 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
861 bch2_hprint(&PBUF(nr_buf), nr);
862 bch2_hprint(&PBUF(per_sec_buf), div64_u64(nr * NSEC_PER_SEC, time));
863 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
864 name_buf, nr_buf, nr_threads,
865 div_u64(time, NSEC_PER_SEC),
866 div_u64(time * nr_threads, nr),
871 #endif /* CONFIG_BCACHEFS_TESTS */