1 #ifdef CONFIG_BCACHEFS_TESTS
4 #include "btree_update.h"
5 #include "journal_reclaim.h"
8 #include "linux/kthread.h"
9 #include "linux/random.h"
11 static void delete_test_keys(struct bch_fs *c)
15 ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
16 POS(0, 0), POS(0, U64_MAX),
20 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
21 POS(0, 0), POS(0, U64_MAX),
28 static void test_delete(struct bch_fs *c, u64 nr)
30 struct btree_iter iter;
31 struct bkey_i_cookie k;
34 bkey_cookie_init(&k.k_i);
36 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
39 ret = bch2_btree_iter_traverse(&iter);
42 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
43 BTREE_INSERT_ENTRY(&iter, &k.k_i));
46 pr_info("deleting once");
47 ret = bch2_btree_delete_at(&iter, 0);
50 pr_info("deleting twice");
51 ret = bch2_btree_delete_at(&iter, 0);
54 bch2_btree_iter_unlock(&iter);
57 static void test_delete_written(struct bch_fs *c, u64 nr)
59 struct btree_iter iter;
60 struct bkey_i_cookie k;
63 bkey_cookie_init(&k.k_i);
65 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
68 ret = bch2_btree_iter_traverse(&iter);
71 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
72 BTREE_INSERT_ENTRY(&iter, &k.k_i));
75 bch2_journal_flush_all_pins(&c->journal);
77 ret = bch2_btree_delete_at(&iter, 0);
80 bch2_btree_iter_unlock(&iter);
83 static void test_iterate(struct bch_fs *c, u64 nr)
85 struct btree_iter iter;
92 pr_info("inserting test keys");
94 for (i = 0; i < nr; i++) {
95 struct bkey_i_cookie k;
97 bkey_cookie_init(&k.k_i);
100 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
105 pr_info("iterating forwards");
109 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k)
110 BUG_ON(k.k->p.offset != i++);
111 bch2_btree_iter_unlock(&iter);
115 pr_info("iterating backwards");
117 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
118 BUG_ON(k.k->p.offset != --i);
119 bch2_btree_iter_unlock(&iter);
124 static void test_iterate_extents(struct bch_fs *c, u64 nr)
126 struct btree_iter iter;
133 pr_info("inserting test extents");
135 for (i = 0; i < nr; i += 8) {
136 struct bkey_i_cookie k;
138 bkey_cookie_init(&k.k_i);
139 k.k.p.offset = i + 8;
142 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
147 pr_info("iterating forwards");
151 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
152 BUG_ON(bkey_start_offset(k.k) != i);
155 bch2_btree_iter_unlock(&iter);
159 pr_info("iterating backwards");
161 while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
162 BUG_ON(k.k->p.offset != i);
163 i = bkey_start_offset(k.k);
165 bch2_btree_iter_unlock(&iter);
170 static void test_iterate_slots(struct bch_fs *c, u64 nr)
172 struct btree_iter iter;
179 pr_info("inserting test keys");
181 for (i = 0; i < nr; i++) {
182 struct bkey_i_cookie k;
184 bkey_cookie_init(&k.k_i);
185 k.k.p.offset = i * 2;
187 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
192 pr_info("iterating forwards");
196 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k) {
197 BUG_ON(k.k->p.offset != i);
200 bch2_btree_iter_unlock(&iter);
204 pr_info("iterating forwards by slots");
208 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0),
209 BTREE_ITER_SLOTS, k) {
210 BUG_ON(bkey_deleted(k.k) != (i & 1));
211 BUG_ON(k.k->p.offset != i++);
216 bch2_btree_iter_unlock(&iter);
219 static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
221 struct btree_iter iter;
228 pr_info("inserting test keys");
230 for (i = 0; i < nr; i += 16) {
231 struct bkey_i_cookie k;
233 bkey_cookie_init(&k.k_i);
234 k.k.p.offset = i + 16;
237 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
242 pr_info("iterating forwards");
246 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
247 BUG_ON(bkey_start_offset(k.k) != i + 8);
248 BUG_ON(k.k->size != 8);
251 bch2_btree_iter_unlock(&iter);
255 pr_info("iterating forwards by slots");
259 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0),
260 BTREE_ITER_SLOTS, k) {
261 BUG_ON(bkey_deleted(k.k) != !(i % 16));
263 BUG_ON(bkey_start_offset(k.k) != i);
264 BUG_ON(k.k->size != 8);
270 bch2_btree_iter_unlock(&iter);
273 /* extent unit tests */
277 static void insert_test_extent(struct bch_fs *c,
280 struct bkey_i_cookie k;
283 //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
285 bkey_cookie_init(&k.k_i);
286 k.k_i.k.p.offset = end;
287 k.k_i.k.size = end - start;
288 k.k_i.k.version.lo = test_version++;
290 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
295 static void __test_extent_overwrite(struct bch_fs *c,
296 u64 e1_start, u64 e1_end,
297 u64 e2_start, u64 e2_end)
299 insert_test_extent(c, e1_start, e1_end);
300 insert_test_extent(c, e2_start, e2_end);
305 static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
307 __test_extent_overwrite(c, 0, 64, 0, 32);
308 __test_extent_overwrite(c, 8, 64, 0, 32);
311 static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
313 __test_extent_overwrite(c, 0, 64, 32, 64);
314 __test_extent_overwrite(c, 0, 64, 32, 72);
317 static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
319 __test_extent_overwrite(c, 0, 64, 32, 40);
322 static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
324 __test_extent_overwrite(c, 32, 64, 0, 64);
325 __test_extent_overwrite(c, 32, 64, 0, 128);
326 __test_extent_overwrite(c, 32, 64, 32, 64);
327 __test_extent_overwrite(c, 32, 64, 32, 128);
332 static u64 test_rand(void)
338 prandom_bytes(&v, sizeof(v));
343 static void rand_insert(struct bch_fs *c, u64 nr)
345 struct bkey_i_cookie k;
349 for (i = 0; i < nr; i++) {
350 bkey_cookie_init(&k.k_i);
351 k.k.p.offset = test_rand();
353 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
359 static void rand_lookup(struct bch_fs *c, u64 nr)
363 for (i = 0; i < nr; i++) {
364 struct btree_iter iter;
367 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
368 POS(0, test_rand()), 0);
370 k = bch2_btree_iter_peek(&iter);
371 bch2_btree_iter_unlock(&iter);
375 static void rand_mixed(struct bch_fs *c, u64 nr)
380 for (i = 0; i < nr; i++) {
381 struct btree_iter iter;
384 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
385 POS(0, test_rand()), 0);
387 k = bch2_btree_iter_peek(&iter);
389 if (!(i & 3) && k.k) {
390 struct bkey_i_cookie k;
392 bkey_cookie_init(&k.k_i);
395 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
396 BTREE_INSERT_ENTRY(&iter, &k.k_i));
400 bch2_btree_iter_unlock(&iter);
405 static void rand_delete(struct bch_fs *c, u64 nr)
411 for (i = 0; i < nr; i++) {
413 k.k.p.offset = test_rand();
415 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
421 static void seq_insert(struct bch_fs *c, u64 nr)
423 struct btree_iter iter;
425 struct bkey_i_cookie insert;
429 bkey_cookie_init(&insert.k_i);
431 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
432 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
433 insert.k.p = iter.pos;
435 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
436 BTREE_INSERT_ENTRY(&iter, &insert.k_i));
442 bch2_btree_iter_unlock(&iter);
445 static void seq_lookup(struct bch_fs *c, u64 nr)
447 struct btree_iter iter;
450 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k)
452 bch2_btree_iter_unlock(&iter);
455 static void seq_overwrite(struct bch_fs *c, u64 nr)
457 struct btree_iter iter;
461 for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
462 BTREE_ITER_INTENT, k) {
463 struct bkey_i_cookie u;
465 bkey_reassemble(&u.k_i, k);
467 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
468 BTREE_INSERT_ENTRY(&iter, &u.k_i));
471 bch2_btree_iter_unlock(&iter);
474 static void seq_delete(struct bch_fs *c, u64 nr)
478 ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
479 POS(0, 0), POS(0, U64_MAX),
484 typedef void (*perf_test_fn)(struct bch_fs *, u64);
493 wait_queue_head_t ready_wait;
496 struct completion done_completion;
502 static int btree_perf_test_thread(void *data)
504 struct test_job *j = data;
506 if (atomic_dec_and_test(&j->ready)) {
507 wake_up(&j->ready_wait);
508 j->start = sched_clock();
510 wait_event(j->ready_wait, !atomic_read(&j->ready));
513 j->fn(j->c, j->nr / j->nr_threads);
515 if (atomic_dec_and_test(&j->done)) {
516 j->finish = sched_clock();
517 complete(&j->done_completion);
523 void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
524 u64 nr, unsigned nr_threads)
526 struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
527 char name_buf[20], nr_buf[20], per_sec_buf[20];
531 atomic_set(&j.ready, nr_threads);
532 init_waitqueue_head(&j.ready_wait);
534 atomic_set(&j.done, nr_threads);
535 init_completion(&j.done_completion);
537 #define perf_test(_test) \
538 if (!strcmp(testname, #_test)) j.fn = _test
540 perf_test(rand_insert);
541 perf_test(rand_lookup);
542 perf_test(rand_mixed);
543 perf_test(rand_delete);
545 perf_test(seq_insert);
546 perf_test(seq_lookup);
547 perf_test(seq_overwrite);
548 perf_test(seq_delete);
550 /* a unit test, not a perf test: */
551 perf_test(test_delete);
552 perf_test(test_delete_written);
553 perf_test(test_iterate);
554 perf_test(test_iterate_extents);
555 perf_test(test_iterate_slots);
556 perf_test(test_iterate_slots_extents);
558 perf_test(test_extent_overwrite_front);
559 perf_test(test_extent_overwrite_back);
560 perf_test(test_extent_overwrite_middle);
561 perf_test(test_extent_overwrite_all);
564 pr_err("unknown test %s", testname);
568 //pr_info("running test %s:", testname);
571 btree_perf_test_thread(&j);
573 for (i = 0; i < nr_threads; i++)
574 kthread_run(btree_perf_test_thread, &j,
575 "bcachefs perf test[%u]", i);
577 while (wait_for_completion_interruptible(&j.done_completion))
580 time = j.finish - j.start;
582 scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
583 bch2_hprint(nr_buf, nr);
584 bch2_hprint(per_sec_buf, nr * NSEC_PER_SEC / time);
585 printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
586 name_buf, nr_buf, nr_threads,
588 time * nr_threads / nr,
592 #endif /* CONFIG_BCACHEFS_TESTS */