1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
9 #include "linux/kthread.h"
10 #include "linux/random.h"
12 static void delete_test_keys(struct bch_fs *c)
16 ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
17 POS(0, 0), POS(0, U64_MAX),
21 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
22 POS(0, 0), POS(0, U64_MAX),
29 static void test_delete(struct bch_fs *c, u64 nr)
31 struct btree_trans trans;
32 struct btree_iter *iter;
33 struct bkey_i_cookie k;
36 bkey_cookie_init(&k.k_i);
38 bch2_trans_init(&trans, c, 0, 0);
40 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
43 ret = bch2_btree_iter_traverse(iter);
46 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
47 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
50 pr_info("deleting once");
51 ret = bch2_btree_delete_at(&trans, iter, 0);
54 pr_info("deleting twice");
55 ret = bch2_btree_delete_at(&trans, iter, 0);
58 bch2_trans_exit(&trans);
61 static void test_delete_written(struct bch_fs *c, u64 nr)
63 struct btree_trans trans;
64 struct btree_iter *iter;
65 struct bkey_i_cookie k;
68 bkey_cookie_init(&k.k_i);
70 bch2_trans_init(&trans, c, 0, 0);
72 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
75 ret = bch2_btree_iter_traverse(iter);
78 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
79 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
82 bch2_journal_flush_all_pins(&c->journal);
84 ret = bch2_btree_delete_at(&trans, iter, 0);
87 bch2_trans_exit(&trans);
90 static void test_iterate(struct bch_fs *c, u64 nr)
92 struct btree_trans trans;
93 struct btree_iter *iter;
98 bch2_trans_init(&trans, c, 0, 0);
102 pr_info("inserting test keys");
104 for (i = 0; i < nr; i++) {
105 struct bkey_i_cookie k;
107 bkey_cookie_init(&k.k_i);
110 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
115 pr_info("iterating forwards");
119 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
121 BUG_ON(k.k->p.offset != i++);
125 pr_info("iterating backwards");
127 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k))
128 BUG_ON(k.k->p.offset != --i);
132 bch2_trans_exit(&trans);
135 static void test_iterate_extents(struct bch_fs *c, u64 nr)
137 struct btree_trans trans;
138 struct btree_iter *iter;
143 bch2_trans_init(&trans, c, 0, 0);
147 pr_info("inserting test extents");
149 for (i = 0; i < nr; i += 8) {
150 struct bkey_i_cookie k;
152 bkey_cookie_init(&k.k_i);
153 k.k.p.offset = i + 8;
156 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
161 pr_info("iterating forwards");
165 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
166 POS_MIN, 0, k, ret) {
167 BUG_ON(bkey_start_offset(k.k) != i);
173 pr_info("iterating backwards");
175 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k)) {
176 BUG_ON(k.k->p.offset != i);
177 i = bkey_start_offset(k.k);
182 bch2_trans_exit(&trans);
185 static void test_iterate_slots(struct bch_fs *c, u64 nr)
187 struct btree_trans trans;
188 struct btree_iter *iter;
193 bch2_trans_init(&trans, c, 0, 0);
197 pr_info("inserting test keys");
199 for (i = 0; i < nr; i++) {
200 struct bkey_i_cookie k;
202 bkey_cookie_init(&k.k_i);
203 k.k.p.offset = i * 2;
205 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
210 pr_info("iterating forwards");
214 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
216 BUG_ON(k.k->p.offset != i);
219 bch2_trans_iter_free(&trans, iter);
223 pr_info("iterating forwards by slots");
227 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
228 BTREE_ITER_SLOTS, k, ret) {
229 BUG_ON(bkey_deleted(k.k) != (i & 1));
230 BUG_ON(k.k->p.offset != i++);
236 bch2_trans_exit(&trans);
239 static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
241 struct btree_trans trans;
242 struct btree_iter *iter;
247 bch2_trans_init(&trans, c, 0, 0);
251 pr_info("inserting test keys");
253 for (i = 0; i < nr; i += 16) {
254 struct bkey_i_cookie k;
256 bkey_cookie_init(&k.k_i);
257 k.k.p.offset = i + 16;
260 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
265 pr_info("iterating forwards");
269 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
271 BUG_ON(bkey_start_offset(k.k) != i + 8);
272 BUG_ON(k.k->size != 8);
275 bch2_trans_iter_free(&trans, iter);
279 pr_info("iterating forwards by slots");
283 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
284 BTREE_ITER_SLOTS, k, ret) {
285 BUG_ON(bkey_deleted(k.k) != !(i % 16));
287 BUG_ON(bkey_start_offset(k.k) != i);
288 BUG_ON(k.k->size != 8);
295 bch2_trans_exit(&trans);
299 * XXX: we really want to make sure we've got a btree with depth > 0 for these
302 static void test_peek_end(struct bch_fs *c, u64 nr)
304 struct btree_trans trans;
305 struct btree_iter *iter;
308 bch2_trans_init(&trans, c, 0, 0);
310 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN, 0);
312 k = bch2_btree_iter_peek(iter);
315 k = bch2_btree_iter_peek(iter);
318 bch2_trans_exit(&trans);
321 static void test_peek_end_extents(struct bch_fs *c, u64 nr)
323 struct btree_trans trans;
324 struct btree_iter *iter;
327 bch2_trans_init(&trans, c, 0, 0);
329 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, 0);
331 k = bch2_btree_iter_peek(iter);
334 k = bch2_btree_iter_peek(iter);
337 bch2_trans_exit(&trans);
340 /* extent unit tests */
344 static void insert_test_extent(struct bch_fs *c,
347 struct bkey_i_cookie k;
350 //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
352 bkey_cookie_init(&k.k_i);
353 k.k_i.k.p.offset = end;
354 k.k_i.k.size = end - start;
355 k.k_i.k.version.lo = test_version++;
357 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
362 static void __test_extent_overwrite(struct bch_fs *c,
363 u64 e1_start, u64 e1_end,
364 u64 e2_start, u64 e2_end)
366 insert_test_extent(c, e1_start, e1_end);
367 insert_test_extent(c, e2_start, e2_end);
372 static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
374 __test_extent_overwrite(c, 0, 64, 0, 32);
375 __test_extent_overwrite(c, 8, 64, 0, 32);
378 static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
380 __test_extent_overwrite(c, 0, 64, 32, 64);
381 __test_extent_overwrite(c, 0, 64, 32, 72);
384 static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
386 __test_extent_overwrite(c, 0, 64, 32, 40);
389 static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
391 __test_extent_overwrite(c, 32, 64, 0, 64);
392 __test_extent_overwrite(c, 32, 64, 0, 128);
393 __test_extent_overwrite(c, 32, 64, 32, 64);
394 __test_extent_overwrite(c, 32, 64, 32, 128);
399 static u64 test_rand(void)
405 prandom_bytes(&v, sizeof(v));
410 static void rand_insert(struct bch_fs *c, u64 nr)
412 struct bkey_i_cookie k;
416 for (i = 0; i < nr; i++) {
417 bkey_cookie_init(&k.k_i);
418 k.k.p.offset = test_rand();
420 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
426 static void rand_lookup(struct bch_fs *c, u64 nr)
428 struct btree_trans trans;
429 struct btree_iter *iter;
433 bch2_trans_init(&trans, c, 0, 0);
435 for (i = 0; i < nr; i++) {
436 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
437 POS(0, test_rand()), 0);
439 k = bch2_btree_iter_peek(iter);
440 bch2_trans_iter_free(&trans, iter);
443 bch2_trans_exit(&trans);
446 static void rand_mixed(struct bch_fs *c, u64 nr)
448 struct btree_trans trans;
449 struct btree_iter *iter;
454 bch2_trans_init(&trans, c, 0, 0);
456 for (i = 0; i < nr; i++) {
457 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
458 POS(0, test_rand()), 0);
460 k = bch2_btree_iter_peek(iter);
462 if (!(i & 3) && k.k) {
463 struct bkey_i_cookie k;
465 bkey_cookie_init(&k.k_i);
468 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
469 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
473 bch2_trans_iter_free(&trans, iter);
476 bch2_trans_exit(&trans);
479 static void rand_delete(struct bch_fs *c, u64 nr)
485 for (i = 0; i < nr; i++) {
487 k.k.p.offset = test_rand();
489 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
495 static void seq_insert(struct bch_fs *c, u64 nr)
497 struct btree_trans trans;
498 struct btree_iter *iter;
500 struct bkey_i_cookie insert;
504 bkey_cookie_init(&insert.k_i);
506 bch2_trans_init(&trans, c, 0, 0);
508 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
509 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
510 insert.k.p = iter->pos;
512 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
513 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
519 bch2_trans_exit(&trans);
522 static void seq_lookup(struct bch_fs *c, u64 nr)
524 struct btree_trans trans;
525 struct btree_iter *iter;
529 bch2_trans_init(&trans, c, 0, 0);
531 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret)
533 bch2_trans_exit(&trans);
536 static void seq_overwrite(struct bch_fs *c, u64 nr)
538 struct btree_trans trans;
539 struct btree_iter *iter;
543 bch2_trans_init(&trans, c, 0, 0);
545 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
546 BTREE_ITER_INTENT, k, ret) {
547 struct bkey_i_cookie u;
549 bkey_reassemble(&u.k_i, k);
551 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &u.k_i));
552 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
555 bch2_trans_exit(&trans);
558 static void seq_delete(struct bch_fs *c, u64 nr)
562 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
563 POS(0, 0), POS(0, U64_MAX),
568 typedef void (*perf_test_fn)(struct bch_fs *, u64);
577 wait_queue_head_t ready_wait;
580 struct completion done_completion;
586 static int btree_perf_test_thread(void *data)
588 struct test_job *j = data;
590 if (atomic_dec_and_test(&j->ready)) {
591 wake_up(&j->ready_wait);
592 j->start = sched_clock();
594 wait_event(j->ready_wait, !atomic_read(&j->ready));
597 j->fn(j->c, j->nr / j->nr_threads);
599 if (atomic_dec_and_test(&j->done)) {
600 j->finish = sched_clock();
601 complete(&j->done_completion);
607 void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
608 u64 nr, unsigned nr_threads)
610 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
611 char name_buf[20], nr_buf[20], per_sec_buf[20];
615 atomic_set(&j.ready, nr_threads);
616 init_waitqueue_head(&j.ready_wait);
618 atomic_set(&j.done, nr_threads);
619 init_completion(&j.done_completion);
621 #define perf_test(_test) \
622 if (!strcmp(testname, #_test)) j.fn = _test
624 perf_test(rand_insert);
625 perf_test(rand_lookup);
626 perf_test(rand_mixed);
627 perf_test(rand_delete);
629 perf_test(seq_insert);
630 perf_test(seq_lookup);
631 perf_test(seq_overwrite);
632 perf_test(seq_delete);
634 /* a unit test, not a perf test: */
635 perf_test(test_delete);
636 perf_test(test_delete_written);
637 perf_test(test_iterate);
638 perf_test(test_iterate_extents);
639 perf_test(test_iterate_slots);
640 perf_test(test_iterate_slots_extents);
641 perf_test(test_peek_end);
642 perf_test(test_peek_end_extents);
644 perf_test(test_extent_overwrite_front);
645 perf_test(test_extent_overwrite_back);
646 perf_test(test_extent_overwrite_middle);
647 perf_test(test_extent_overwrite_all);
650 pr_err("unknown test %s", testname);
654 //pr_info("running test %s:", testname);
657 btree_perf_test_thread(&j);
659 for (i = 0; i < nr_threads; i++)
660 kthread_run(btree_perf_test_thread, &j,
661 "bcachefs perf test[%u]", i);
663 while (wait_for_completion_interruptible(&j.done_completion))
666 time = j.finish - j.start;
668 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
669 bch2_hprint(&PBUF(nr_buf), nr);
670 bch2_hprint(&PBUF(per_sec_buf), nr * NSEC_PER_SEC / time);
671 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
672 name_buf, nr_buf, nr_threads,
674 time * nr_threads / nr,
678 #endif /* CONFIG_BCACHEFS_TESTS */