]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/tests.c
f06eb2d8425f4131f8d455f0e38bd0b111426d15
[bcachefs-tools-debian] / libbcachefs / tests.c
1 #ifdef CONFIG_BCACHEFS_TESTS
2
3 #include "bcachefs.h"
4 #include "btree_update.h"
5 #include "journal_reclaim.h"
6 #include "tests.h"
7
8 #include "linux/kthread.h"
9 #include "linux/random.h"
10
11 static void delete_test_keys(struct bch_fs *c)
12 {
13         int ret;
14
15         ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
16                                       POS(0, 0), POS(0, U64_MAX),
17                                       NULL);
18         BUG_ON(ret);
19
20         ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
21                                       POS(0, 0), POS(0, U64_MAX),
22                                       NULL);
23         BUG_ON(ret);
24 }
25
26 /* unit tests */
27
28 static void test_delete(struct bch_fs *c, u64 nr)
29 {
30         struct btree_iter iter;
31         struct bkey_i_cookie k;
32         int ret;
33
34         bkey_cookie_init(&k.k_i);
35
36         bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
37                              BTREE_ITER_INTENT);
38
39         ret = bch2_btree_iter_traverse(&iter);
40         BUG_ON(ret);
41
42         ret = bch2_btree_insert_at(c, NULL, NULL, 0,
43                                    BTREE_INSERT_ENTRY(&iter, &k.k_i));
44         BUG_ON(ret);
45
46         pr_info("deleting once");
47         ret = bch2_btree_delete_at(&iter, 0);
48         BUG_ON(ret);
49
50         pr_info("deleting twice");
51         ret = bch2_btree_delete_at(&iter, 0);
52         BUG_ON(ret);
53
54         bch2_btree_iter_unlock(&iter);
55 }
56
57 static void test_delete_written(struct bch_fs *c, u64 nr)
58 {
59         struct btree_iter iter;
60         struct bkey_i_cookie k;
61         int ret;
62
63         bkey_cookie_init(&k.k_i);
64
65         bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p,
66                              BTREE_ITER_INTENT);
67
68         ret = bch2_btree_iter_traverse(&iter);
69         BUG_ON(ret);
70
71         ret = bch2_btree_insert_at(c, NULL, NULL, 0,
72                                    BTREE_INSERT_ENTRY(&iter, &k.k_i));
73         BUG_ON(ret);
74
75         bch2_journal_flush_all_pins(&c->journal);
76
77         ret = bch2_btree_delete_at(&iter, 0);
78         BUG_ON(ret);
79
80         bch2_btree_iter_unlock(&iter);
81 }
82
83 static void test_iterate(struct bch_fs *c, u64 nr)
84 {
85         struct btree_iter iter;
86         struct bkey_s_c k;
87         u64 i;
88         int ret;
89
90         delete_test_keys(c);
91
92         pr_info("inserting test keys");
93
94         for (i = 0; i < nr; i++) {
95                 struct bkey_i_cookie k;
96
97                 bkey_cookie_init(&k.k_i);
98                 k.k.p.offset = i;
99
100                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
101                                         NULL, NULL, 0);
102                 BUG_ON(ret);
103         }
104
105         pr_info("iterating forwards");
106
107         i = 0;
108
109         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k)
110                 BUG_ON(k.k->p.offset != i++);
111         bch2_btree_iter_unlock(&iter);
112
113         BUG_ON(i != nr);
114
115         pr_info("iterating backwards");
116
117         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k))
118                 BUG_ON(k.k->p.offset != --i);
119         bch2_btree_iter_unlock(&iter);
120
121         BUG_ON(i);
122 }
123
124 static void test_iterate_extents(struct bch_fs *c, u64 nr)
125 {
126         struct btree_iter iter;
127         struct bkey_s_c k;
128         u64 i;
129         int ret;
130
131         delete_test_keys(c);
132
133         pr_info("inserting test extents");
134
135         for (i = 0; i < nr; i += 8) {
136                 struct bkey_i_cookie k;
137
138                 bkey_cookie_init(&k.k_i);
139                 k.k.p.offset = i + 8;
140                 k.k.size = 8;
141
142                 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
143                                         NULL, NULL, 0);
144                 BUG_ON(ret);
145         }
146
147         pr_info("iterating forwards");
148
149         i = 0;
150
151         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
152                 BUG_ON(bkey_start_offset(k.k) != i);
153                 i = k.k->p.offset;
154         }
155         bch2_btree_iter_unlock(&iter);
156
157         BUG_ON(i != nr);
158
159         pr_info("iterating backwards");
160
161         while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) {
162                 BUG_ON(k.k->p.offset != i);
163                 i = bkey_start_offset(k.k);
164         }
165         bch2_btree_iter_unlock(&iter);
166
167         BUG_ON(i);
168 }
169
170 static void test_iterate_slots(struct bch_fs *c, u64 nr)
171 {
172         struct btree_iter iter;
173         struct bkey_s_c k;
174         u64 i;
175         int ret;
176
177         delete_test_keys(c);
178
179         pr_info("inserting test keys");
180
181         for (i = 0; i < nr; i++) {
182                 struct bkey_i_cookie k;
183
184                 bkey_cookie_init(&k.k_i);
185                 k.k.p.offset = i * 2;
186
187                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
188                                         NULL, NULL, 0);
189                 BUG_ON(ret);
190         }
191
192         pr_info("iterating forwards");
193
194         i = 0;
195
196         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k) {
197                 BUG_ON(k.k->p.offset != i);
198                 i += 2;
199         }
200         bch2_btree_iter_unlock(&iter);
201
202         BUG_ON(i != nr * 2);
203
204         pr_info("iterating forwards by slots");
205
206         i = 0;
207
208         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0),
209                            BTREE_ITER_SLOTS, k) {
210                 BUG_ON(bkey_deleted(k.k) != (i & 1));
211                 BUG_ON(k.k->p.offset != i++);
212
213                 if (i == nr * 2)
214                         break;
215         }
216         bch2_btree_iter_unlock(&iter);
217 }
218
219 static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
220 {
221         struct btree_iter iter;
222         struct bkey_s_c k;
223         u64 i;
224         int ret;
225
226         delete_test_keys(c);
227
228         pr_info("inserting test keys");
229
230         for (i = 0; i < nr; i += 16) {
231                 struct bkey_i_cookie k;
232
233                 bkey_cookie_init(&k.k_i);
234                 k.k.p.offset = i + 16;
235                 k.k.size = 8;
236
237                 ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
238                                         NULL, NULL, 0);
239                 BUG_ON(ret);
240         }
241
242         pr_info("iterating forwards");
243
244         i = 0;
245
246         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
247                 BUG_ON(bkey_start_offset(k.k) != i + 8);
248                 BUG_ON(k.k->size != 8);
249                 i += 16;
250         }
251         bch2_btree_iter_unlock(&iter);
252
253         BUG_ON(i != nr);
254
255         pr_info("iterating forwards by slots");
256
257         i = 0;
258
259         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0),
260                            BTREE_ITER_SLOTS, k) {
261                 BUG_ON(bkey_deleted(k.k) != !(i % 16));
262
263                 BUG_ON(bkey_start_offset(k.k) != i);
264                 BUG_ON(k.k->size != 8);
265                 i = k.k->p.offset;
266
267                 if (i == nr)
268                         break;
269         }
270         bch2_btree_iter_unlock(&iter);
271 }
272
273 /* extent unit tests */
274
275 u64 test_version;
276
277 static void insert_test_extent(struct bch_fs *c,
278                                u64 start, u64 end)
279 {
280         struct bkey_i_cookie k;
281         int ret;
282
283         //pr_info("inserting %llu-%llu v %llu", start, end, test_version);
284
285         bkey_cookie_init(&k.k_i);
286         k.k_i.k.p.offset = end;
287         k.k_i.k.size = end - start;
288         k.k_i.k.version.lo = test_version++;
289
290         ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
291                                 NULL, NULL, 0);
292         BUG_ON(ret);
293 }
294
295 static void __test_extent_overwrite(struct bch_fs *c,
296                                     u64 e1_start, u64 e1_end,
297                                     u64 e2_start, u64 e2_end)
298 {
299         insert_test_extent(c, e1_start, e1_end);
300         insert_test_extent(c, e2_start, e2_end);
301
302         delete_test_keys(c);
303 }
304
305 static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
306 {
307         __test_extent_overwrite(c, 0, 64, 0, 32);
308         __test_extent_overwrite(c, 8, 64, 0, 32);
309 }
310
311 static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
312 {
313         __test_extent_overwrite(c, 0, 64, 32, 64);
314         __test_extent_overwrite(c, 0, 64, 32, 72);
315 }
316
317 static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
318 {
319         __test_extent_overwrite(c, 0, 64, 32, 40);
320 }
321
322 static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
323 {
324         __test_extent_overwrite(c, 32, 64,  0,  64);
325         __test_extent_overwrite(c, 32, 64,  0, 128);
326         __test_extent_overwrite(c, 32, 64, 32,  64);
327         __test_extent_overwrite(c, 32, 64, 32, 128);
328 }
329
330 /* perf tests */
331
332 static u64 test_rand(void)
333 {
334         u64 v;
335 #if 0
336         v = prandom_u32();
337 #else
338         prandom_bytes(&v, sizeof(v));
339 #endif
340         return v;
341 }
342
343 static void rand_insert(struct bch_fs *c, u64 nr)
344 {
345         struct bkey_i_cookie k;
346         int ret;
347         u64 i;
348
349         for (i = 0; i < nr; i++) {
350                 bkey_cookie_init(&k.k_i);
351                 k.k.p.offset = test_rand();
352
353                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
354                                         NULL, NULL, 0);
355                 BUG_ON(ret);
356         }
357 }
358
359 static void rand_lookup(struct bch_fs *c, u64 nr)
360 {
361         u64 i;
362
363         for (i = 0; i < nr; i++) {
364                 struct btree_iter iter;
365                 struct bkey_s_c k;
366
367                 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
368                                      POS(0, test_rand()), 0);
369
370                 k = bch2_btree_iter_peek(&iter);
371                 bch2_btree_iter_unlock(&iter);
372         }
373 }
374
375 static void rand_mixed(struct bch_fs *c, u64 nr)
376 {
377         int ret;
378         u64 i;
379
380         for (i = 0; i < nr; i++) {
381                 struct btree_iter iter;
382                 struct bkey_s_c k;
383
384                 bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS,
385                                      POS(0, test_rand()), 0);
386
387                 k = bch2_btree_iter_peek(&iter);
388
389                 if (!(i & 3) && k.k) {
390                         struct bkey_i_cookie k;
391
392                         bkey_cookie_init(&k.k_i);
393                         k.k.p = iter.pos;
394
395                         ret = bch2_btree_insert_at(c, NULL, NULL, 0,
396                                                    BTREE_INSERT_ENTRY(&iter, &k.k_i));
397                         BUG_ON(ret);
398                 }
399
400                 bch2_btree_iter_unlock(&iter);
401         }
402
403 }
404
405 static void rand_delete(struct bch_fs *c, u64 nr)
406 {
407         struct bkey_i k;
408         int ret;
409         u64 i;
410
411         for (i = 0; i < nr; i++) {
412                 bkey_init(&k.k);
413                 k.k.p.offset = test_rand();
414
415                 ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
416                                         NULL, NULL, 0);
417                 BUG_ON(ret);
418         }
419 }
420
421 static void seq_insert(struct bch_fs *c, u64 nr)
422 {
423         struct btree_iter iter;
424         struct bkey_s_c k;
425         struct bkey_i_cookie insert;
426         int ret;
427         u64 i = 0;
428
429         bkey_cookie_init(&insert.k_i);
430
431         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
432                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
433                 insert.k.p = iter.pos;
434
435                 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
436                                 BTREE_INSERT_ENTRY(&iter, &insert.k_i));
437                 BUG_ON(ret);
438
439                 if (++i == nr)
440                         break;
441         }
442         bch2_btree_iter_unlock(&iter);
443 }
444
445 static void seq_lookup(struct bch_fs *c, u64 nr)
446 {
447         struct btree_iter iter;
448         struct bkey_s_c k;
449
450         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k)
451                 ;
452         bch2_btree_iter_unlock(&iter);
453 }
454
455 static void seq_overwrite(struct bch_fs *c, u64 nr)
456 {
457         struct btree_iter iter;
458         struct bkey_s_c k;
459         int ret;
460
461         for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN,
462                            BTREE_ITER_INTENT, k) {
463                 struct bkey_i_cookie u;
464
465                 bkey_reassemble(&u.k_i, k);
466
467                 ret = bch2_btree_insert_at(c, NULL, NULL, 0,
468                                            BTREE_INSERT_ENTRY(&iter, &u.k_i));
469                 BUG_ON(ret);
470         }
471         bch2_btree_iter_unlock(&iter);
472 }
473
474 static void seq_delete(struct bch_fs *c, u64 nr)
475 {
476         int ret;
477
478         ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
479                                       POS(0, 0), POS(0, U64_MAX),
480                                       NULL);
481         BUG_ON(ret);
482 }
483
484 typedef void (*perf_test_fn)(struct bch_fs *, u64);
485
486 struct test_job {
487         struct bch_fs                   *c;
488         u64                             nr;
489         unsigned                        nr_threads;
490         perf_test_fn                    fn;
491
492         atomic_t                        ready;
493         wait_queue_head_t               ready_wait;
494
495         atomic_t                        done;
496         struct completion               done_completion;
497
498         u64                             start;
499         u64                             finish;
500 };
501
502 static int btree_perf_test_thread(void *data)
503 {
504         struct test_job *j = data;
505
506         if (atomic_dec_and_test(&j->ready)) {
507                 wake_up(&j->ready_wait);
508                 j->start = sched_clock();
509         } else {
510                 wait_event(j->ready_wait, !atomic_read(&j->ready));
511         }
512
513         j->fn(j->c, j->nr / j->nr_threads);
514
515         if (atomic_dec_and_test(&j->done)) {
516                 j->finish = sched_clock();
517                 complete(&j->done_completion);
518         }
519
520         return 0;
521 }
522
523 void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
524                           u64 nr, unsigned nr_threads)
525 {
526         struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
527         char name_buf[20], nr_buf[20], per_sec_buf[20];
528         unsigned i;
529         u64 time;
530
531         atomic_set(&j.ready, nr_threads);
532         init_waitqueue_head(&j.ready_wait);
533
534         atomic_set(&j.done, nr_threads);
535         init_completion(&j.done_completion);
536
537 #define perf_test(_test)                                \
538         if (!strcmp(testname, #_test)) j.fn = _test
539
540         perf_test(rand_insert);
541         perf_test(rand_lookup);
542         perf_test(rand_mixed);
543         perf_test(rand_delete);
544
545         perf_test(seq_insert);
546         perf_test(seq_lookup);
547         perf_test(seq_overwrite);
548         perf_test(seq_delete);
549
550         /* a unit test, not a perf test: */
551         perf_test(test_delete);
552         perf_test(test_delete_written);
553         perf_test(test_iterate);
554         perf_test(test_iterate_extents);
555         perf_test(test_iterate_slots);
556         perf_test(test_iterate_slots_extents);
557
558         perf_test(test_extent_overwrite_front);
559         perf_test(test_extent_overwrite_back);
560         perf_test(test_extent_overwrite_middle);
561         perf_test(test_extent_overwrite_all);
562
563         if (!j.fn) {
564                 pr_err("unknown test %s", testname);
565                 return;
566         }
567
568         //pr_info("running test %s:", testname);
569
570         if (nr_threads == 1)
571                 btree_perf_test_thread(&j);
572         else
573                 for (i = 0; i < nr_threads; i++)
574                         kthread_run(btree_perf_test_thread, &j,
575                                     "bcachefs perf test[%u]", i);
576
577         while (wait_for_completion_interruptible(&j.done_completion))
578                 ;
579
580         time = j.finish - j.start;
581
582         scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
583         bch2_hprint(nr_buf, nr);
584         bch2_hprint(per_sec_buf, nr * NSEC_PER_SEC / time);
585         printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
586                 name_buf, nr_buf, nr_threads,
587                 time / NSEC_PER_SEC,
588                 time * nr_threads / nr,
589                 per_sec_buf);
590 }
591
592 #endif /* CONFIG_BCACHEFS_TESTS */