1 #ifdef CONFIG_BCACHEFS_TESTS
4 #include "btree_update.h"
5 #include "journal_reclaim.h"
8 #include "linux/kthread.h"
9 #include "linux/random.h"
11 static void delete_test_keys(struct bch_fs *c)
15 ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
16 POS(0, 0), POS(0, U64_MAX),
20 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
21 POS(0, 0), POS(0, U64_MAX),
28 static void test_delete(struct bch_fs *c, u64 nr)
30 struct btree_trans trans;
31 struct btree_iter *iter;
32 struct bkey_i_cookie k;
35 bkey_cookie_init(&k.k_i);
37 bch2_trans_init(&trans, c);
39 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
42 ret = bch2_btree_iter_traverse(iter);
45 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
46 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
49 pr_info("deleting once");
50 ret = bch2_btree_delete_at(&trans, iter, 0);
53 pr_info("deleting twice");
54 ret = bch2_btree_delete_at(&trans, iter, 0);
57 bch2_trans_exit(&trans);
60 static void test_delete_written(struct bch_fs *c, u64 nr)
62 struct btree_trans trans;
63 struct btree_iter *iter;
64 struct bkey_i_cookie k;
67 bkey_cookie_init(&k.k_i);
69 bch2_trans_init(&trans, c);
71 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
74 ret = bch2_btree_iter_traverse(iter);
77 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
78 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
81 bch2_journal_flush_all_pins(&c->journal);
83 ret = bch2_btree_delete_at(&trans, iter, 0);
86 bch2_trans_exit(&trans);
89 static void test_iterate(struct bch_fs *c, u64 nr)
91 struct btree_iter iter;
98 pr_info("inserting test keys");
100 for (i = 0; i < nr; i++) {
101 struct bkey_i_cookie k;
103 bkey_cookie_init(&k.k_i);
106 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
111 pr_info("iterating forwards");
115 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k)
116 BUG_ON(k.k->p.offset != i++);
117 bch2_btree_iter_unlock(&iter);
121 pr_info("iterating backwards");
123 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
124 BUG_ON(k.k->p.offset != --i);
125 bch2_btree_iter_unlock(&iter);
130 static void test_iterate_extents(struct bch_fs *c, u64 nr)
132 struct btree_iter iter;
139 pr_info("inserting test extents");
141 for (i = 0; i < nr; i += 8) {
142 struct bkey_i_cookie k;
144 bkey_cookie_init(&k.k_i);
145 k.k.p.offset = i + 8;
148 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
153 pr_info("iterating forwards");
157 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
158 BUG_ON(bkey_start_offset(k.k) != i);
161 bch2_btree_iter_unlock(&iter);
165 pr_info("iterating backwards");
167 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
168 BUG_ON(k.k->p.offset != i);
169 i = bkey_start_offset(k.k);
171 bch2_btree_iter_unlock(&iter);
176 static void test_iterate_slots(struct bch_fs *c, u64 nr)
178 struct btree_iter iter;
185 pr_info("inserting test keys");
187 for (i = 0; i < nr; i++) {
188 struct bkey_i_cookie k;
190 bkey_cookie_init(&k.k_i);
191 k.k.p.offset = i * 2;
193 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
198 pr_info("iterating forwards");
202 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k) {
203 BUG_ON(k.k->p.offset != i);
206 bch2_btree_iter_unlock(&iter);
210 pr_info("iterating forwards by slots");
214 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0),
215 BTREE_ITER_SLOTS, k) {
216 BUG_ON(bkey_deleted(k.k) != (i & 1));
217 BUG_ON(k.k->p.offset != i++);
222 bch2_btree_iter_unlock(&iter);
225 static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
227 struct btree_iter iter;
234 pr_info("inserting test keys");
236 for (i = 0; i < nr; i += 16) {
237 struct bkey_i_cookie k;
239 bkey_cookie_init(&k.k_i);
240 k.k.p.offset = i + 16;
243 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
248 pr_info("iterating forwards");
252 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
253 BUG_ON(bkey_start_offset(k.k) != i + 8);
254 BUG_ON(k.k->size != 8);
257 bch2_btree_iter_unlock(&iter);
261 pr_info("iterating forwards by slots");
265 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0),
266 BTREE_ITER_SLOTS, k) {
267 BUG_ON(bkey_deleted(k.k) != !(i % 16));
269 BUG_ON(bkey_start_offset(k.k) != i);
270 BUG_ON(k.k->size != 8);
276 bch2_btree_iter_unlock(&iter);
280 * XXX: we really want to make sure we've got a btree with depth > 0 for these
283 static void test_peek_end(struct bch_fs *c, u64 nr)
285 struct btree_iter iter;
288 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0);
290 k = bch2_btree_iter_peek(&iter);
293 k = bch2_btree_iter_peek(&iter);
296 bch2_btree_iter_unlock(&iter);
299 static void test_peek_end_extents(struct bch_fs *c, u64 nr)
301 struct btree_iter iter;
304 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
306 k = bch2_btree_iter_peek(&iter);
309 k = bch2_btree_iter_peek(&iter);
312 bch2_btree_iter_unlock(&iter);
315 /* extent unit tests */
319 static void insert_test_extent(struct bch_fs *c,
322 struct bkey_i_cookie k;
325 //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
327 bkey_cookie_init(&k.k_i);
328 k.k_i.k.p.offset = end;
329 k.k_i.k.size = end - start;
330 k.k_i.k.version.lo = test_version++;
332 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
337 static void __test_extent_overwrite(struct bch_fs *c,
338 u64 e1_start, u64 e1_end,
339 u64 e2_start, u64 e2_end)
341 insert_test_extent(c, e1_start, e1_end);
342 insert_test_extent(c, e2_start, e2_end);
347 static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
349 __test_extent_overwrite(c, 0, 64, 0, 32);
350 __test_extent_overwrite(c, 8, 64, 0, 32);
353 static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
355 __test_extent_overwrite(c, 0, 64, 32, 64);
356 __test_extent_overwrite(c, 0, 64, 32, 72);
359 static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
361 __test_extent_overwrite(c, 0, 64, 32, 40);
364 static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
366 __test_extent_overwrite(c, 32, 64, 0, 64);
367 __test_extent_overwrite(c, 32, 64, 0, 128);
368 __test_extent_overwrite(c, 32, 64, 32, 64);
369 __test_extent_overwrite(c, 32, 64, 32, 128);
374 static u64 test_rand(void)
380 prandom_bytes(&v, sizeof(v));
385 static void rand_insert(struct bch_fs *c, u64 nr)
387 struct bkey_i_cookie k;
391 for (i = 0; i < nr; i++) {
392 bkey_cookie_init(&k.k_i);
393 k.k.p.offset = test_rand();
395 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
401 static void rand_lookup(struct bch_fs *c, u64 nr)
405 for (i = 0; i < nr; i++) {
406 struct btree_iter iter;
409 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
410 POS(0, test_rand()), 0);
412 k = bch2_btree_iter_peek(&iter);
413 bch2_btree_iter_unlock(&iter);
417 static void rand_mixed(struct bch_fs *c, u64 nr)
422 for (i = 0; i < nr; i++) {
423 struct btree_trans trans;
424 struct btree_iter *iter;
427 bch2_trans_init(&trans, c);
429 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
430 POS(0, test_rand()), 0);
432 k = bch2_btree_iter_peek(iter);
434 if (!(i & 3) && k.k) {
435 struct bkey_i_cookie k;
437 bkey_cookie_init(&k.k_i);
440 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
441 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
445 bch2_trans_exit(&trans);
450 static void rand_delete(struct bch_fs *c, u64 nr)
456 for (i = 0; i < nr; i++) {
458 k.k.p.offset = test_rand();
460 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
466 static void seq_insert(struct bch_fs *c, u64 nr)
468 struct btree_trans trans;
469 struct btree_iter *iter;
471 struct bkey_i_cookie insert;
475 bkey_cookie_init(&insert.k_i);
477 bch2_trans_init(&trans, c);
479 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
480 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
482 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
483 insert.k.p = iter->pos;
485 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
486 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
492 bch2_trans_exit(&trans);
495 static void seq_lookup(struct bch_fs *c, u64 nr)
497 struct btree_iter iter;
500 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k)
502 bch2_btree_iter_unlock(&iter);
505 static void seq_overwrite(struct bch_fs *c, u64 nr)
507 struct btree_trans trans;
508 struct btree_iter *iter;
512 bch2_trans_init(&trans, c);
514 iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
517 for_each_btree_key_continue(iter, 0, k) {
518 struct bkey_i_cookie u;
520 bkey_reassemble(&u.k_i, k);
522 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &u.k_i));
523 ret = bch2_trans_commit(&trans, NULL, NULL, 0);
526 bch2_trans_exit(&trans);
529 static void seq_delete(struct bch_fs *c, u64 nr)
533 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
534 POS(0, 0), POS(0, U64_MAX),
539 typedef void (*perf_test_fn)(struct bch_fs *, u64);
548 wait_queue_head_t ready_wait;
551 struct completion done_completion;
557 static int btree_perf_test_thread(void *data)
559 struct test_job *j = data;
561 if (atomic_dec_and_test(&j->ready)) {
562 wake_up(&j->ready_wait);
563 j->start = sched_clock();
565 wait_event(j->ready_wait, !atomic_read(&j->ready));
568 j->fn(j->c, j->nr / j->nr_threads);
570 if (atomic_dec_and_test(&j->done)) {
571 j->finish = sched_clock();
572 complete(&j->done_completion);
578 void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
579 u64 nr, unsigned nr_threads)
581 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
582 char name_buf[20], nr_buf[20], per_sec_buf[20];
586 atomic_set(&j.ready, nr_threads);
587 init_waitqueue_head(&j.ready_wait);
589 atomic_set(&j.done, nr_threads);
590 init_completion(&j.done_completion);
592 #define perf_test(_test) \
593 if (!strcmp(testname, #_test)) j.fn = _test
595 perf_test(rand_insert);
596 perf_test(rand_lookup);
597 perf_test(rand_mixed);
598 perf_test(rand_delete);
600 perf_test(seq_insert);
601 perf_test(seq_lookup);
602 perf_test(seq_overwrite);
603 perf_test(seq_delete);
605 /* a unit test, not a perf test: */
606 perf_test(test_delete);
607 perf_test(test_delete_written);
608 perf_test(test_iterate);
609 perf_test(test_iterate_extents);
610 perf_test(test_iterate_slots);
611 perf_test(test_iterate_slots_extents);
612 perf_test(test_peek_end);
613 perf_test(test_peek_end_extents);
615 perf_test(test_extent_overwrite_front);
616 perf_test(test_extent_overwrite_back);
617 perf_test(test_extent_overwrite_middle);
618 perf_test(test_extent_overwrite_all);
621 pr_err("unknown test %s", testname);
625 //pr_info("running test %s:", testname);
628 btree_perf_test_thread(&j);
630 for (i = 0; i < nr_threads; i++)
631 kthread_run(btree_perf_test_thread, &j,
632 "bcachefs perf test[%u]", i);
634 while (wait_for_completion_interruptible(&j.done_completion))
637 time = j.finish - j.start;
639 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
640 bch2_hprint(&PBUF(nr_buf), nr);
641 bch2_hprint(&PBUF(per_sec_buf), nr * NSEC_PER_SEC / time);
642 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
643 name_buf, nr_buf, nr_threads,
645 time * nr_threads / nr,
649 #endif /* CONFIG_BCACHEFS_TESTS */