1 #ifdef CONFIG_BCACHEFS_TESTS
4 #include "btree_update.h"
5 #include "journal_reclaim.h"
8 #include "linux/kthread.h"
9 #include "linux/random.h"
11 static void delete_test_keys(struct bch_fs *c)
15 ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
16 POS(0, 0), POS(0, U64_MAX),
20 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
21 POS(0, 0), POS(0, U64_MAX),
28 static void test_delete(struct bch_fs *c, u64 nr)
30 struct btree_trans trans;
31 struct btree_iter *iter;
32 struct bkey_i_cookie k;
35 bkey_cookie_init(&k.k_i);
37 bch2_trans_init(&trans, c);
39 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
42 ret = bch2_btree_iter_traverse(iter);
45 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
46 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
49 pr_info("deleting once");
50 ret = bch2_btree_delete_at(&trans, iter, 0);
53 pr_info("deleting twice");
54 ret = bch2_btree_delete_at(&trans, iter, 0);
57 bch2_trans_exit(&trans);
60 static void test_delete_written(struct bch_fs *c, u64 nr)
62 struct btree_trans trans;
63 struct btree_iter *iter;
64 struct bkey_i_cookie k;
67 bkey_cookie_init(&k.k_i);
69 bch2_trans_init(&trans, c);
71 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
74 ret = bch2_btree_iter_traverse(iter);
77 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
78 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
81 bch2_journal_flush_all_pins(&c->journal);
83 ret = bch2_btree_delete_at(&trans, iter, 0);
86 bch2_trans_exit(&trans);
89 static void test_iterate(struct bch_fs *c, u64 nr)
91 struct btree_trans trans;
92 struct btree_iter *iter;
97 bch2_trans_init(&trans, c);
101 pr_info("inserting test keys");
103 for (i = 0; i < nr; i++) {
104 struct bkey_i_cookie k;
106 bkey_cookie_init(&k.k_i);
109 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
114 pr_info("iterating forwards");
118 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
120 BUG_ON(k.k->p.offset != i++);
124 pr_info("iterating backwards");
126 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k))
127 BUG_ON(k.k->p.offset != --i);
131 bch2_trans_exit(&trans);
134 static void test_iterate_extents(struct bch_fs *c, u64 nr)
136 struct btree_trans trans;
137 struct btree_iter *iter;
142 bch2_trans_init(&trans, c);
146 pr_info("inserting test extents");
148 for (i = 0; i < nr; i += 8) {
149 struct bkey_i_cookie k;
151 bkey_cookie_init(&k.k_i);
152 k.k.p.offset = i + 8;
155 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
160 pr_info("iterating forwards");
164 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
165 POS_MIN, 0, k, ret) {
166 BUG_ON(bkey_start_offset(k.k) != i);
172 pr_info("iterating backwards");
174 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k)) {
175 BUG_ON(k.k->p.offset != i);
176 i = bkey_start_offset(k.k);
181 bch2_trans_exit(&trans);
184 static void test_iterate_slots(struct bch_fs *c, u64 nr)
186 struct btree_trans trans;
187 struct btree_iter *iter;
192 bch2_trans_init(&trans, c);
196 pr_info("inserting test keys");
198 for (i = 0; i < nr; i++) {
199 struct bkey_i_cookie k;
201 bkey_cookie_init(&k.k_i);
202 k.k.p.offset = i * 2;
204 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
209 pr_info("iterating forwards");
213 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
215 BUG_ON(k.k->p.offset != i);
218 bch2_trans_iter_free(&trans, iter);
222 pr_info("iterating forwards by slots");
226 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
227 BTREE_ITER_SLOTS, k, ret) {
228 BUG_ON(bkey_deleted(k.k) != (i & 1));
229 BUG_ON(k.k->p.offset != i++);
235 bch2_trans_exit(&trans);
238 static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
240 struct btree_trans trans;
241 struct btree_iter *iter;
246 bch2_trans_init(&trans, c);
250 pr_info("inserting test keys");
252 for (i = 0; i < nr; i += 16) {
253 struct bkey_i_cookie k;
255 bkey_cookie_init(&k.k_i);
256 k.k.p.offset = i + 16;
259 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
264 pr_info("iterating forwards");
268 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
270 BUG_ON(bkey_start_offset(k.k) != i + 8);
271 BUG_ON(k.k->size != 8);
274 bch2_trans_iter_free(&trans, iter);
278 pr_info("iterating forwards by slots");
282 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
283 BTREE_ITER_SLOTS, k, ret) {
284 BUG_ON(bkey_deleted(k.k) != !(i % 16));
286 BUG_ON(bkey_start_offset(k.k) != i);
287 BUG_ON(k.k->size != 8);
294 bch2_trans_exit(&trans);
298 * XXX: we really want to make sure we've got a btree with depth > 0 for these
301 static void test_peek_end(struct bch_fs *c, u64 nr)
303 struct btree_trans trans;
304 struct btree_iter *iter;
307 bch2_trans_init(&trans, c);
309 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN, 0);
311 k = bch2_btree_iter_peek(iter);
314 k = bch2_btree_iter_peek(iter);
317 bch2_trans_exit(&trans);
320 static void test_peek_end_extents(struct bch_fs *c, u64 nr)
322 struct btree_trans trans;
323 struct btree_iter *iter;
326 bch2_trans_init(&trans, c);
328 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, 0);
330 k = bch2_btree_iter_peek(iter);
333 k = bch2_btree_iter_peek(iter);
336 bch2_trans_exit(&trans);
339 /* extent unit tests */
343 static void insert_test_extent(struct bch_fs *c,
346 struct bkey_i_cookie k;
349 //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
351 bkey_cookie_init(&k.k_i);
352 k.k_i.k.p.offset = end;
353 k.k_i.k.size = end - start;
354 k.k_i.k.version.lo = test_version++;
356 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
361 static void __test_extent_overwrite(struct bch_fs *c,
362 u64 e1_start, u64 e1_end,
363 u64 e2_start, u64 e2_end)
365 insert_test_extent(c, e1_start, e1_end);
366 insert_test_extent(c, e2_start, e2_end);
371 static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
373 __test_extent_overwrite(c, 0, 64, 0, 32);
374 __test_extent_overwrite(c, 8, 64, 0, 32);
377 static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
379 __test_extent_overwrite(c, 0, 64, 32, 64);
380 __test_extent_overwrite(c, 0, 64, 32, 72);
383 static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
385 __test_extent_overwrite(c, 0, 64, 32, 40);
388 static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
390 __test_extent_overwrite(c, 32, 64, 0, 64);
391 __test_extent_overwrite(c, 32, 64, 0, 128);
392 __test_extent_overwrite(c, 32, 64, 32, 64);
393 __test_extent_overwrite(c, 32, 64, 32, 128);
398 static u64 test_rand(void)
404 prandom_bytes(&v, sizeof(v));
409 static void rand_insert(struct bch_fs *c, u64 nr)
411 struct bkey_i_cookie k;
415 for (i = 0; i < nr; i++) {
416 bkey_cookie_init(&k.k_i);
417 k.k.p.offset = test_rand();
419 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
425 static void rand_lookup(struct bch_fs *c, u64 nr)
427 struct btree_trans trans;
428 struct btree_iter *iter;
432 bch2_trans_init(&trans, c);
434 for (i = 0; i < nr; i++) {
435 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
436 POS(0, test_rand()), 0);
438 k = bch2_btree_iter_peek(iter);
439 bch2_trans_iter_free(&trans, iter);
442 bch2_trans_exit(&trans);
445 static void rand_mixed(struct bch_fs *c, u64 nr)
447 struct btree_trans trans;
448 struct btree_iter *iter;
453 bch2_trans_init(&trans, c);
455 for (i = 0; i < nr; i++) {
456 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
457 POS(0, test_rand()), 0);
459 k = bch2_btree_iter_peek(iter);
461 if (!(i & 3) && k.k) {
462 struct bkey_i_cookie k;
464 bkey_cookie_init(&k.k_i);
467 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
468 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
472 bch2_trans_iter_free(&trans, iter);
475 bch2_trans_exit(&trans);
478 static void rand_delete(struct bch_fs *c, u64 nr)
484 for (i = 0; i < nr; i++) {
486 k.k.p.offset = test_rand();
488 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
494 static void seq_insert(struct bch_fs *c, u64 nr)
496 struct btree_trans trans;
497 struct btree_iter *iter;
499 struct bkey_i_cookie insert;
503 bkey_cookie_init(&insert.k_i);
505 bch2_trans_init(&trans, c);
507 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
508 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
509 insert.k.p = iter->pos;
511 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
512 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
518 bch2_trans_exit(&trans);
521 static void seq_lookup(struct bch_fs *c, u64 nr)
523 struct btree_trans trans;
524 struct btree_iter *iter;
527 bch2_trans_init(&trans, c);
529 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k)
531 bch2_trans_exit(&trans);
534 static void seq_overwrite(struct bch_fs *c, u64 nr)
536 struct btree_trans trans;
537 struct btree_iter *iter;
541 bch2_trans_init(&trans, c);
543 for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
544 BTREE_ITER_INTENT, k) {
545 struct bkey_i_cookie u;
547 bkey_reassemble(&u.k_i, k);
549 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &u.k_i));
550 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
553 bch2_trans_exit(&trans);
556 static void seq_delete(struct bch_fs *c, u64 nr)
560 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
561 POS(0, 0), POS(0, U64_MAX),
566 typedef void (*perf_test_fn)(struct bch_fs *, u64);
575 wait_queue_head_t ready_wait;
578 struct completion done_completion;
584 static int btree_perf_test_thread(void *data)
586 struct test_job *j = data;
588 if (atomic_dec_and_test(&j->ready)) {
589 wake_up(&j->ready_wait);
590 j->start = sched_clock();
592 wait_event(j->ready_wait, !atomic_read(&j->ready));
595 j->fn(j->c, j->nr / j->nr_threads);
597 if (atomic_dec_and_test(&j->done)) {
598 j->finish = sched_clock();
599 complete(&j->done_completion);
605 void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
606 u64 nr, unsigned nr_threads)
608 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
609 char name_buf[20], nr_buf[20], per_sec_buf[20];
613 atomic_set(&j.ready, nr_threads);
614 init_waitqueue_head(&j.ready_wait);
616 atomic_set(&j.done, nr_threads);
617 init_completion(&j.done_completion);
619 #define perf_test(_test) \
620 if (!strcmp(testname, #_test)) j.fn = _test
622 perf_test(rand_insert);
623 perf_test(rand_lookup);
624 perf_test(rand_mixed);
625 perf_test(rand_delete);
627 perf_test(seq_insert);
628 perf_test(seq_lookup);
629 perf_test(seq_overwrite);
630 perf_test(seq_delete);
632 /* a unit test, not a perf test: */
633 perf_test(test_delete);
634 perf_test(test_delete_written);
635 perf_test(test_iterate);
636 perf_test(test_iterate_extents);
637 perf_test(test_iterate_slots);
638 perf_test(test_iterate_slots_extents);
639 perf_test(test_peek_end);
640 perf_test(test_peek_end_extents);
642 perf_test(test_extent_overwrite_front);
643 perf_test(test_extent_overwrite_back);
644 perf_test(test_extent_overwrite_middle);
645 perf_test(test_extent_overwrite_all);
648 pr_err("unknown test %s", testname);
652 //pr_info("running test %s:", testname);
655 btree_perf_test_thread(&j);
657 for (i = 0; i < nr_threads; i++)
658 kthread_run(btree_perf_test_thread, &j,
659 "bcachefs perf test[%u]", i);
661 while (wait_for_completion_interruptible(&j.done_completion))
664 time = j.finish - j.start;
666 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
667 bch2_hprint(&PBUF(nr_buf), nr);
668 bch2_hprint(&PBUF(per_sec_buf), nr * NSEC_PER_SEC / time);
669 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
670 name_buf, nr_buf, nr_threads,
672 time * nr_threads / nr,
676 #endif /* CONFIG_BCACHEFS_TESTS */