1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
9 #include "linux/kthread.h"
10 #include "linux/random.h"
12 static void delete_test_keys(struct bch_fs *c)
16 ret = bch2_btree_delete_range(c, BTREE_ID_extents,
17 POS(0, 0), POS(0, U64_MAX),
21 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
22 POS(0, 0), POS(0, U64_MAX),
29 static int test_delete(struct bch_fs *c, u64 nr)
31 struct btree_trans trans;
32 struct btree_iter *iter;
33 struct bkey_i_cookie k;
36 bkey_cookie_init(&k.k_i);
37 k.k.p.snapshot = U32_MAX;
39 bch2_trans_init(&trans, c, 0, 0);
41 iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
44 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
45 bch2_btree_iter_traverse(iter) ?:
46 bch2_trans_update(&trans, iter, &k.k_i, 0));
48 bch_err(c, "update error in test_delete: %i", ret);
52 pr_info("deleting once");
53 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
54 bch2_btree_iter_traverse(iter) ?:
55 bch2_btree_delete_at(&trans, iter, 0));
57 bch_err(c, "delete error (first) in test_delete: %i", ret);
61 pr_info("deleting twice");
62 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
63 bch2_btree_iter_traverse(iter) ?:
64 bch2_btree_delete_at(&trans, iter, 0));
66 bch_err(c, "delete error (second) in test_delete: %i", ret);
70 bch2_trans_iter_put(&trans, iter);
71 bch2_trans_exit(&trans);
75 static int test_delete_written(struct bch_fs *c, u64 nr)
77 struct btree_trans trans;
78 struct btree_iter *iter;
79 struct bkey_i_cookie k;
82 bkey_cookie_init(&k.k_i);
83 k.k.p.snapshot = U32_MAX;
85 bch2_trans_init(&trans, c, 0, 0);
87 iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
90 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
91 bch2_btree_iter_traverse(iter) ?:
92 bch2_trans_update(&trans, iter, &k.k_i, 0));
94 bch_err(c, "update error in test_delete_written: %i", ret);
98 bch2_trans_unlock(&trans);
99 bch2_journal_flush_all_pins(&c->journal);
101 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
102 bch2_btree_iter_traverse(iter) ?:
103 bch2_btree_delete_at(&trans, iter, 0));
105 bch_err(c, "delete error in test_delete_written: %i", ret);
109 bch2_trans_iter_put(&trans, iter);
110 bch2_trans_exit(&trans);
114 static int test_iterate(struct bch_fs *c, u64 nr)
116 struct btree_trans trans;
117 struct btree_iter *iter = NULL;
122 bch2_trans_init(&trans, c, 0, 0);
126 pr_info("inserting test keys");
128 for (i = 0; i < nr; i++) {
129 struct bkey_i_cookie k;
131 bkey_cookie_init(&k.k_i);
133 k.k.p.snapshot = U32_MAX;
135 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
138 bch_err(c, "insert error in test_iterate: %i", ret);
143 pr_info("iterating forwards");
147 for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
148 POS_MIN, 0, k, ret) {
152 BUG_ON(k.k->p.offset != i++);
157 pr_info("iterating backwards");
159 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k))
160 BUG_ON(k.k->p.offset != --i);
164 bch2_trans_iter_put(&trans, iter);
165 bch2_trans_exit(&trans);
169 static int test_iterate_extents(struct bch_fs *c, u64 nr)
171 struct btree_trans trans;
172 struct btree_iter *iter = NULL;
177 bch2_trans_init(&trans, c, 0, 0);
181 pr_info("inserting test extents");
183 for (i = 0; i < nr; i += 8) {
184 struct bkey_i_cookie k;
186 bkey_cookie_init(&k.k_i);
187 k.k.p.offset = i + 8;
188 k.k.p.snapshot = U32_MAX;
191 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
194 bch_err(c, "insert error in test_iterate_extents: %i", ret);
199 pr_info("iterating forwards");
203 for_each_btree_key(&trans, iter, BTREE_ID_extents,
204 POS_MIN, 0, k, ret) {
205 BUG_ON(bkey_start_offset(k.k) != i);
211 pr_info("iterating backwards");
213 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k)) {
214 BUG_ON(k.k->p.offset != i);
215 i = bkey_start_offset(k.k);
220 bch2_trans_iter_put(&trans, iter);
221 bch2_trans_exit(&trans);
225 static int test_iterate_slots(struct bch_fs *c, u64 nr)
227 struct btree_trans trans;
228 struct btree_iter *iter;
233 bch2_trans_init(&trans, c, 0, 0);
237 pr_info("inserting test keys");
239 for (i = 0; i < nr; i++) {
240 struct bkey_i_cookie k;
242 bkey_cookie_init(&k.k_i);
243 k.k.p.offset = i * 2;
244 k.k.p.snapshot = U32_MAX;
246 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
249 bch_err(c, "insert error in test_iterate_slots: %i", ret);
254 pr_info("iterating forwards");
258 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
263 BUG_ON(k.k->p.offset != i);
266 bch2_trans_iter_put(&trans, iter);
270 pr_info("iterating forwards by slots");
274 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
275 BTREE_ITER_SLOTS, k, ret) {
276 BUG_ON(k.k->p.offset != i);
277 BUG_ON(bkey_deleted(k.k) != (i & 1));
283 bch2_trans_iter_put(&trans, iter);
285 bch2_trans_exit(&trans);
289 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
291 struct btree_trans trans;
292 struct btree_iter *iter;
297 bch2_trans_init(&trans, c, 0, 0);
301 pr_info("inserting test keys");
303 for (i = 0; i < nr; i += 16) {
304 struct bkey_i_cookie k;
306 bkey_cookie_init(&k.k_i);
307 k.k.p.offset = i + 16;
308 k.k.p.snapshot = U32_MAX;
311 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
314 bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
319 pr_info("iterating forwards");
323 for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
325 BUG_ON(bkey_start_offset(k.k) != i + 8);
326 BUG_ON(k.k->size != 8);
329 bch2_trans_iter_put(&trans, iter);
333 pr_info("iterating forwards by slots");
337 for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
338 BTREE_ITER_SLOTS, k, ret) {
339 BUG_ON(bkey_deleted(k.k) != !(i % 16));
341 BUG_ON(bkey_start_offset(k.k) != i);
342 BUG_ON(k.k->size != 8);
348 bch2_trans_iter_put(&trans, iter);
350 bch2_trans_exit(&trans);
355 * XXX: we really want to make sure we've got a btree with depth > 0 for these
358 static int test_peek_end(struct bch_fs *c, u64 nr)
360 struct btree_trans trans;
361 struct btree_iter *iter;
364 bch2_trans_init(&trans, c, 0, 0);
366 iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
368 k = bch2_btree_iter_peek(iter);
371 k = bch2_btree_iter_peek(iter);
374 bch2_trans_iter_put(&trans, iter);
376 bch2_trans_exit(&trans);
380 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
382 struct btree_trans trans;
383 struct btree_iter *iter;
386 bch2_trans_init(&trans, c, 0, 0);
388 iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN, 0);
390 k = bch2_btree_iter_peek(iter);
393 k = bch2_btree_iter_peek(iter);
396 bch2_trans_iter_put(&trans, iter);
398 bch2_trans_exit(&trans);
402 /* extent unit tests */
406 static int insert_test_extent(struct bch_fs *c,
409 struct bkey_i_cookie k;
412 //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
414 bkey_cookie_init(&k.k_i);
415 k.k_i.k.p.offset = end;
416 k.k_i.k.p.snapshot = U32_MAX;
417 k.k_i.k.size = end - start;
418 k.k_i.k.version.lo = test_version++;
420 ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
423 bch_err(c, "insert error in insert_test_extent: %i", ret);
427 static int __test_extent_overwrite(struct bch_fs *c,
428 u64 e1_start, u64 e1_end,
429 u64 e2_start, u64 e2_end)
433 ret = insert_test_extent(c, e1_start, e1_end) ?:
434 insert_test_extent(c, e2_start, e2_end);
440 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
442 return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
443 __test_extent_overwrite(c, 8, 64, 0, 32);
446 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
448 return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
449 __test_extent_overwrite(c, 0, 64, 32, 72);
452 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
454 return __test_extent_overwrite(c, 0, 64, 32, 40);
457 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
459 return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
460 __test_extent_overwrite(c, 32, 64, 0, 128) ?:
461 __test_extent_overwrite(c, 32, 64, 32, 64) ?:
462 __test_extent_overwrite(c, 32, 64, 32, 128);
467 static u64 test_rand(void)
473 prandom_bytes(&v, sizeof(v));
478 static int rand_insert(struct bch_fs *c, u64 nr)
480 struct btree_trans trans;
481 struct bkey_i_cookie k;
485 bch2_trans_init(&trans, c, 0, 0);
487 for (i = 0; i < nr; i++) {
488 bkey_cookie_init(&k.k_i);
489 k.k.p.offset = test_rand();
490 k.k.p.snapshot = U32_MAX;
492 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
493 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
495 bch_err(c, "error in rand_insert: %i", ret);
500 bch2_trans_exit(&trans);
504 static int rand_insert_multi(struct bch_fs *c, u64 nr)
506 struct btree_trans trans;
507 struct bkey_i_cookie k[8];
512 bch2_trans_init(&trans, c, 0, 0);
514 for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
515 for (j = 0; j < ARRAY_SIZE(k); j++) {
516 bkey_cookie_init(&k[j].k_i);
517 k[j].k.p.offset = test_rand();
518 k[j].k.p.snapshot = U32_MAX;
521 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
522 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?:
523 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?:
524 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?:
525 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?:
526 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?:
527 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?:
528 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
529 __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
531 bch_err(c, "error in rand_insert_multi: %i", ret);
536 bch2_trans_exit(&trans);
540 static int rand_lookup(struct bch_fs *c, u64 nr)
542 struct btree_trans trans;
543 struct btree_iter *iter;
548 bch2_trans_init(&trans, c, 0, 0);
549 iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
551 for (i = 0; i < nr; i++) {
552 bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
554 k = bch2_btree_iter_peek(iter);
557 bch_err(c, "error in rand_lookup: %i", ret);
562 bch2_trans_iter_put(&trans, iter);
563 bch2_trans_exit(&trans);
567 static int rand_mixed(struct bch_fs *c, u64 nr)
569 struct btree_trans trans;
570 struct btree_iter *iter;
575 bch2_trans_init(&trans, c, 0, 0);
576 iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
578 for (i = 0; i < nr; i++) {
579 bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
581 k = bch2_btree_iter_peek(iter);
584 bch_err(c, "lookup error in rand_mixed: %i", ret);
588 if (!(i & 3) && k.k) {
589 struct bkey_i_cookie k;
591 bkey_cookie_init(&k.k_i);
594 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
595 bch2_btree_iter_traverse(iter) ?:
596 bch2_trans_update(&trans, iter, &k.k_i, 0));
598 bch_err(c, "update error in rand_mixed: %i", ret);
604 bch2_trans_iter_put(&trans, iter);
605 bch2_trans_exit(&trans);
609 static int __do_delete(struct btree_trans *trans, struct bpos pos)
611 struct btree_iter *iter;
612 struct bkey_i delete;
616 iter = bch2_trans_get_iter(trans, BTREE_ID_xattrs, pos,
618 k = bch2_btree_iter_peek(iter);
626 bkey_init(&delete.k);
629 ret = bch2_trans_update(trans, iter, &delete, 0);
631 bch2_trans_iter_put(trans, iter);
635 static int rand_delete(struct bch_fs *c, u64 nr)
637 struct btree_trans trans;
641 bch2_trans_init(&trans, c, 0, 0);
643 for (i = 0; i < nr; i++) {
644 struct bpos pos = POS(0, test_rand());
646 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
647 __do_delete(&trans, pos));
649 bch_err(c, "error in rand_delete: %i", ret);
654 bch2_trans_exit(&trans);
658 static int seq_insert(struct bch_fs *c, u64 nr)
660 struct btree_trans trans;
661 struct btree_iter *iter;
663 struct bkey_i_cookie insert;
667 bkey_cookie_init(&insert.k_i);
669 bch2_trans_init(&trans, c, 0, 0);
671 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
672 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
673 insert.k.p = iter->pos;
675 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
676 bch2_btree_iter_traverse(iter) ?:
677 bch2_trans_update(&trans, iter, &insert.k_i, 0));
679 bch_err(c, "error in seq_insert: %i", ret);
686 bch2_trans_iter_put(&trans, iter);
688 bch2_trans_exit(&trans);
692 static int seq_lookup(struct bch_fs *c, u64 nr)
694 struct btree_trans trans;
695 struct btree_iter *iter;
699 bch2_trans_init(&trans, c, 0, 0);
701 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, 0, k, ret)
703 bch2_trans_iter_put(&trans, iter);
705 bch2_trans_exit(&trans);
709 static int seq_overwrite(struct bch_fs *c, u64 nr)
711 struct btree_trans trans;
712 struct btree_iter *iter;
716 bch2_trans_init(&trans, c, 0, 0);
718 for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
719 BTREE_ITER_INTENT, k, ret) {
720 struct bkey_i_cookie u;
722 bkey_reassemble(&u.k_i, k);
724 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
725 bch2_btree_iter_traverse(iter) ?:
726 bch2_trans_update(&trans, iter, &u.k_i, 0));
728 bch_err(c, "error in seq_overwrite: %i", ret);
732 bch2_trans_iter_put(&trans, iter);
734 bch2_trans_exit(&trans);
738 static int seq_delete(struct bch_fs *c, u64 nr)
742 ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
743 POS(0, 0), POS(0, U64_MAX),
746 bch_err(c, "error in seq_delete: %i", ret);
750 typedef int (*perf_test_fn)(struct bch_fs *, u64);
759 wait_queue_head_t ready_wait;
762 struct completion done_completion;
769 static int btree_perf_test_thread(void *data)
771 struct test_job *j = data;
774 if (atomic_dec_and_test(&j->ready)) {
775 wake_up(&j->ready_wait);
776 j->start = sched_clock();
778 wait_event(j->ready_wait, !atomic_read(&j->ready));
781 ret = j->fn(j->c, j->nr / j->nr_threads);
785 if (atomic_dec_and_test(&j->done)) {
786 j->finish = sched_clock();
787 complete(&j->done_completion);
793 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
794 u64 nr, unsigned nr_threads)
796 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
797 char name_buf[20], nr_buf[20], per_sec_buf[20];
801 atomic_set(&j.ready, nr_threads);
802 init_waitqueue_head(&j.ready_wait);
804 atomic_set(&j.done, nr_threads);
805 init_completion(&j.done_completion);
807 #define perf_test(_test) \
808 if (!strcmp(testname, #_test)) j.fn = _test
810 perf_test(rand_insert);
811 perf_test(rand_insert_multi);
812 perf_test(rand_lookup);
813 perf_test(rand_mixed);
814 perf_test(rand_delete);
816 perf_test(seq_insert);
817 perf_test(seq_lookup);
818 perf_test(seq_overwrite);
819 perf_test(seq_delete);
821 /* a unit test, not a perf test: */
822 perf_test(test_delete);
823 perf_test(test_delete_written);
824 perf_test(test_iterate);
825 perf_test(test_iterate_extents);
826 perf_test(test_iterate_slots);
827 perf_test(test_iterate_slots_extents);
828 perf_test(test_peek_end);
829 perf_test(test_peek_end_extents);
831 perf_test(test_extent_overwrite_front);
832 perf_test(test_extent_overwrite_back);
833 perf_test(test_extent_overwrite_middle);
834 perf_test(test_extent_overwrite_all);
837 pr_err("unknown test %s", testname);
841 //pr_info("running test %s:", testname);
844 btree_perf_test_thread(&j);
846 for (i = 0; i < nr_threads; i++)
847 kthread_run(btree_perf_test_thread, &j,
848 "bcachefs perf test[%u]", i);
850 while (wait_for_completion_interruptible(&j.done_completion))
853 time = j.finish - j.start;
855 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
856 bch2_hprint(&PBUF(nr_buf), nr);
857 bch2_hprint(&PBUF(per_sec_buf), nr * NSEC_PER_SEC / time);
858 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
859 name_buf, nr_buf, nr_threads,
861 time * nr_threads / nr,
866 #endif /* CONFIG_BCACHEFS_TESTS */