1 #ifdef CONFIG_BCACHEFS_TESTS
4 #include "btree_update.h"
5 #include "journal_reclaim.h"
8 #include "linux/kthread.h"
9 #include "linux/random.h"
11 static void delete_test_keys(struct bch_fs *c)
15 ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
16 POS(0, 0), POS(0, U64_MAX),
20 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
21 POS(0, 0), POS(0, U64_MAX),
28 static void test_delete(struct bch_fs *c, u64 nr)
30 struct btree_iter iter;
31 struct bkey_i_cookie k;
34 bkey_cookie_init(&k.k_i);
36 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
39 ret = bch2_btree_iter_traverse(&iter);
42 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
43 BTREE_INSERT_ENTRY(&iter, &k.k_i));
46 pr_info("deleting once");
47 ret = bch2_btree_delete_at(&iter, 0);
50 pr_info("deleting twice");
51 ret = bch2_btree_delete_at(&iter, 0);
54 bch2_btree_iter_unlock(&iter);
57 static void test_delete_written(struct bch_fs *c, u64 nr)
59 struct btree_iter iter;
60 struct bkey_i_cookie k;
63 bkey_cookie_init(&k.k_i);
65 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
68 ret = bch2_btree_iter_traverse(&iter);
71 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
72 BTREE_INSERT_ENTRY(&iter, &k.k_i));
75 bch2_journal_flush_all_pins(&c->journal);
77 ret = bch2_btree_delete_at(&iter, 0);
80 bch2_btree_iter_unlock(&iter);
83 static void test_iterate(struct bch_fs *c, u64 nr)
85 struct btree_iter iter;
92 pr_info("inserting test keys");
94 for (i = 0; i < nr; i++) {
95 struct bkey_i_cookie k;
97 bkey_cookie_init(&k.k_i);
100 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
105 pr_info("iterating forwards");
109 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k)
110 BUG_ON(k.k->p.offset != i++);
111 bch2_btree_iter_unlock(&iter);
115 pr_info("iterating backwards");
117 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
118 BUG_ON(k.k->p.offset != --i);
119 bch2_btree_iter_unlock(&iter);
124 static void test_iterate_extents(struct bch_fs *c, u64 nr)
126 struct btree_iter iter;
133 pr_info("inserting test extents");
135 for (i = 0; i < nr; i += 8) {
136 struct bkey_i_cookie k;
138 bkey_cookie_init(&k.k_i);
139 k.k.p.offset = i + 8;
142 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
147 pr_info("iterating forwards");
151 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
152 BUG_ON(bkey_start_offset(k.k) != i);
155 bch2_btree_iter_unlock(&iter);
159 pr_info("iterating backwards");
161 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
162 BUG_ON(k.k->p.offset != i);
163 i = bkey_start_offset(k.k);
165 bch2_btree_iter_unlock(&iter);
170 static void test_iterate_slots(struct bch_fs *c, u64 nr)
172 struct btree_iter iter;
179 pr_info("inserting test keys");
181 for (i = 0; i < nr; i++) {
182 struct bkey_i_cookie k;
184 bkey_cookie_init(&k.k_i);
185 k.k.p.offset = i * 2;
187 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
192 pr_info("iterating forwards");
196 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k) {
197 BUG_ON(k.k->p.offset != i);
200 bch2_btree_iter_unlock(&iter);
204 pr_info("iterating forwards by slots");
208 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0),
209 BTREE_ITER_SLOTS, k) {
210 BUG_ON(bkey_deleted(k.k) != (i & 1));
211 BUG_ON(k.k->p.offset != i++);
216 bch2_btree_iter_unlock(&iter);
219 static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
221 struct btree_iter iter;
228 pr_info("inserting test keys");
230 for (i = 0; i < nr; i += 16) {
231 struct bkey_i_cookie k;
233 bkey_cookie_init(&k.k_i);
234 k.k.p.offset = i + 16;
237 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
242 pr_info("iterating forwards");
246 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
247 BUG_ON(bkey_start_offset(k.k) != i + 8);
248 BUG_ON(k.k->size != 8);
251 bch2_btree_iter_unlock(&iter);
255 pr_info("iterating forwards by slots");
259 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0),
260 BTREE_ITER_SLOTS, k) {
261 BUG_ON(bkey_deleted(k.k) != !(i % 16));
263 BUG_ON(bkey_start_offset(k.k) != i);
264 BUG_ON(k.k->size != 8);
270 bch2_btree_iter_unlock(&iter);
274 * XXX: we really want to make sure we've got a btree with depth > 0 for these
277 static void test_peek_end(struct bch_fs *c, u64 nr)
279 struct btree_iter iter;
282 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0);
284 k = bch2_btree_iter_peek(&iter);
287 k = bch2_btree_iter_peek(&iter);
290 bch2_btree_iter_unlock(&iter);
293 static void test_peek_end_extents(struct bch_fs *c, u64 nr)
295 struct btree_iter iter;
298 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
300 k = bch2_btree_iter_peek(&iter);
303 k = bch2_btree_iter_peek(&iter);
306 bch2_btree_iter_unlock(&iter);
309 /* extent unit tests */
313 static void insert_test_extent(struct bch_fs *c,
316 struct bkey_i_cookie k;
319 //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
321 bkey_cookie_init(&k.k_i);
322 k.k_i.k.p.offset = end;
323 k.k_i.k.size = end - start;
324 k.k_i.k.version.lo = test_version++;
326 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
331 static void __test_extent_overwrite(struct bch_fs *c,
332 u64 e1_start, u64 e1_end,
333 u64 e2_start, u64 e2_end)
335 insert_test_extent(c, e1_start, e1_end);
336 insert_test_extent(c, e2_start, e2_end);
341 static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
343 __test_extent_overwrite(c, 0, 64, 0, 32);
344 __test_extent_overwrite(c, 8, 64, 0, 32);
347 static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
349 __test_extent_overwrite(c, 0, 64, 32, 64);
350 __test_extent_overwrite(c, 0, 64, 32, 72);
353 static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
355 __test_extent_overwrite(c, 0, 64, 32, 40);
358 static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
360 __test_extent_overwrite(c, 32, 64, 0, 64);
361 __test_extent_overwrite(c, 32, 64, 0, 128);
362 __test_extent_overwrite(c, 32, 64, 32, 64);
363 __test_extent_overwrite(c, 32, 64, 32, 128);
368 static u64 test_rand(void)
374 prandom_bytes(&v, sizeof(v));
379 static void rand_insert(struct bch_fs *c, u64 nr)
381 struct bkey_i_cookie k;
385 for (i = 0; i < nr; i++) {
386 bkey_cookie_init(&k.k_i);
387 k.k.p.offset = test_rand();
389 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
395 static void rand_lookup(struct bch_fs *c, u64 nr)
399 for (i = 0; i < nr; i++) {
400 struct btree_iter iter;
403 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
404 POS(0, test_rand()), 0);
406 k = bch2_btree_iter_peek(&iter);
407 bch2_btree_iter_unlock(&iter);
411 static void rand_mixed(struct bch_fs *c, u64 nr)
416 for (i = 0; i < nr; i++) {
417 struct btree_iter iter;
420 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
421 POS(0, test_rand()), 0);
423 k = bch2_btree_iter_peek(&iter);
425 if (!(i & 3) && k.k) {
426 struct bkey_i_cookie k;
428 bkey_cookie_init(&k.k_i);
431 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
432 BTREE_INSERT_ENTRY(&iter, &k.k_i));
436 bch2_btree_iter_unlock(&iter);
441 static void rand_delete(struct bch_fs *c, u64 nr)
447 for (i = 0; i < nr; i++) {
449 k.k.p.offset = test_rand();
451 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
457 static void seq_insert(struct bch_fs *c, u64 nr)
459 struct btree_iter iter;
461 struct bkey_i_cookie insert;
465 bkey_cookie_init(&insert.k_i);
467 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
468 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
469 insert.k.p = iter.pos;
471 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
472 BTREE_INSERT_ENTRY(&iter, &insert.k_i));
478 bch2_btree_iter_unlock(&iter);
481 static void seq_lookup(struct bch_fs *c, u64 nr)
483 struct btree_iter iter;
486 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k)
488 bch2_btree_iter_unlock(&iter);
491 static void seq_overwrite(struct bch_fs *c, u64 nr)
493 struct btree_iter iter;
497 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
498 BTREE_ITER_INTENT, k) {
499 struct bkey_i_cookie u;
501 bkey_reassemble(&u.k_i, k);
503 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
504 BTREE_INSERT_ENTRY(&iter, &u.k_i));
507 bch2_btree_iter_unlock(&iter);
510 static void seq_delete(struct bch_fs *c, u64 nr)
514 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
515 POS(0, 0), POS(0, U64_MAX),
520 typedef void (*perf_test_fn)(struct bch_fs *, u64);
529 wait_queue_head_t ready_wait;
532 struct completion done_completion;
538 static int btree_perf_test_thread(void *data)
540 struct test_job *j = data;
542 if (atomic_dec_and_test(&j->ready)) {
543 wake_up(&j->ready_wait);
544 j->start = sched_clock();
546 wait_event(j->ready_wait, !atomic_read(&j->ready));
549 j->fn(j->c, j->nr / j->nr_threads);
551 if (atomic_dec_and_test(&j->done)) {
552 j->finish = sched_clock();
553 complete(&j->done_completion);
559 void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
560 u64 nr, unsigned nr_threads)
562 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
563 char name_buf[20], nr_buf[20], per_sec_buf[20];
567 atomic_set(&j.ready, nr_threads);
568 init_waitqueue_head(&j.ready_wait);
570 atomic_set(&j.done, nr_threads);
571 init_completion(&j.done_completion);
573 #define perf_test(_test) \
574 if (!strcmp(testname, #_test)) j.fn = _test
576 perf_test(rand_insert);
577 perf_test(rand_lookup);
578 perf_test(rand_mixed);
579 perf_test(rand_delete);
581 perf_test(seq_insert);
582 perf_test(seq_lookup);
583 perf_test(seq_overwrite);
584 perf_test(seq_delete);
586 /* a unit test, not a perf test: */
587 perf_test(test_delete);
588 perf_test(test_delete_written);
589 perf_test(test_iterate);
590 perf_test(test_iterate_extents);
591 perf_test(test_iterate_slots);
592 perf_test(test_iterate_slots_extents);
593 perf_test(test_peek_end);
594 perf_test(test_peek_end_extents);
596 perf_test(test_extent_overwrite_front);
597 perf_test(test_extent_overwrite_back);
598 perf_test(test_extent_overwrite_middle);
599 perf_test(test_extent_overwrite_all);
602 pr_err("unknown test %s", testname);
606 //pr_info("running test %s:", testname);
609 btree_perf_test_thread(&j);
611 for (i = 0; i < nr_threads; i++)
612 kthread_run(btree_perf_test_thread, &j,
613 "bcachefs perf test[%u]", i);
615 while (wait_for_completion_interruptible(&j.done_completion))
618 time = j.finish - j.start;
620 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
621 bch2_hprint(nr_buf, nr);
622 bch2_hprint(per_sec_buf, nr * NSEC_PER_SEC / time);
623 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
624 name_buf, nr_buf, nr_threads,
626 time * nr_threads / nr,
630 #endif /* CONFIG_BCACHEFS_TESTS */