]> git.sesse.net Git - bcachefs-tools-debian/blob - include/trace/events/bcache.h
01e4b79d408bea1701da274098a6bfe94dd61546
[bcachefs-tools-debian] / include / trace / events / bcache.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM bcache
3
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
6
7 #include <linux/tracepoint.h>
8
9 struct bcache_device;
10 struct bio;
11 struct bkey;
12 struct btree;
13 struct cache;
14 struct cache_set;
15 struct keylist;
16 struct moving_queue;
17
18 DECLARE_EVENT_CLASS(bcache_request,
19         TP_PROTO(struct bcache_device *d, struct bio *bio),
20         TP_ARGS(d, bio),
21
22         TP_STRUCT__entry(
23                 __field(dev_t,          dev                     )
24                 __field(unsigned int,   orig_major              )
25                 __field(unsigned int,   orig_minor              )
26                 __field(sector_t,       sector                  )
27                 __field(sector_t,       orig_sector             )
28                 __field(unsigned int,   nr_sector               )
29                 __array(char,           rwbs,   6               )
30         ),
31
32         TP_fast_assign(
33                 __entry->dev            = bio->bi_bdev->bd_dev;
34                 __entry->orig_major     = d->disk->major;
35                 __entry->orig_minor     = d->disk->first_minor;
36                 __entry->sector         = bio->bi_iter.bi_sector;
37                 __entry->orig_sector    = bio->bi_iter.bi_sector - 16;
38                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
39                 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
40                               bio->bi_iter.bi_size);
41         ),
42
43         TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
44                   MAJOR(__entry->dev), MINOR(__entry->dev),
45                   __entry->rwbs, (unsigned long long)__entry->sector,
46                   __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
47                   (unsigned long long)__entry->orig_sector)
48 );
49
50 DECLARE_EVENT_CLASS(bpos,
51         TP_PROTO(struct bpos p),
52         TP_ARGS(p),
53
54         TP_STRUCT__entry(
55                 __field(u64,    inode                           )
56                 __field(u64,    offset                          )
57         ),
58
59         TP_fast_assign(
60                 __entry->inode  = p.inode;
61                 __entry->offset = p.offset;
62         ),
63
64         TP_printk("%llu:%llu", __entry->inode, __entry->offset)
65 );
66
67 DECLARE_EVENT_CLASS(bkey,
68         TP_PROTO(const struct bkey *k),
69         TP_ARGS(k),
70
71         TP_STRUCT__entry(
72                 __field(u64,    inode                           )
73                 __field(u64,    offset                          )
74                 __field(u32,    size                            )
75         ),
76
77         TP_fast_assign(
78                 __entry->inode  = k->p.inode;
79                 __entry->offset = k->p.offset;
80                 __entry->size   = k->size;
81         ),
82
83         TP_printk("%llu:%llu len %u", __entry->inode,
84                   __entry->offset, __entry->size)
85 );
86
87 /* request.c */
88
89 DEFINE_EVENT(bcache_request, bcache_request_start,
90         TP_PROTO(struct bcache_device *d, struct bio *bio),
91         TP_ARGS(d, bio)
92 );
93
94 DEFINE_EVENT(bcache_request, bcache_request_end,
95         TP_PROTO(struct bcache_device *d, struct bio *bio),
96         TP_ARGS(d, bio)
97 );
98
99 DECLARE_EVENT_CLASS(bcache_bio,
100         TP_PROTO(struct bio *bio),
101         TP_ARGS(bio),
102
103         TP_STRUCT__entry(
104                 __field(dev_t,          dev                     )
105                 __field(sector_t,       sector                  )
106                 __field(unsigned int,   nr_sector               )
107                 __array(char,           rwbs,   6               )
108         ),
109
110         TP_fast_assign(
111                 __entry->dev            = bio->bi_bdev->bd_dev;
112                 __entry->sector         = bio->bi_iter.bi_sector;
113                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
114                 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
115                               bio->bi_iter.bi_size);
116         ),
117
118         TP_printk("%d,%d  %s %llu + %u",
119                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
120                   (unsigned long long)__entry->sector, __entry->nr_sector)
121 );
122
123 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
124         TP_PROTO(struct bio *bio),
125         TP_ARGS(bio)
126 );
127
128 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
129         TP_PROTO(struct bio *bio),
130         TP_ARGS(bio)
131 );
132
133 DEFINE_EVENT(bcache_bio, bcache_promote,
134         TP_PROTO(struct bio *bio),
135         TP_ARGS(bio)
136 );
137
138 DEFINE_EVENT(bkey, bcache_promote_collision,
139         TP_PROTO(const struct bkey *k),
140         TP_ARGS(k)
141 );
142
143 TRACE_EVENT(bcache_read,
144         TP_PROTO(struct bio *bio, bool hit, bool bypass),
145         TP_ARGS(bio, hit, bypass),
146
147         TP_STRUCT__entry(
148                 __field(dev_t,          dev                     )
149                 __field(sector_t,       sector                  )
150                 __field(unsigned int,   nr_sector               )
151                 __array(char,           rwbs,   6               )
152                 __field(bool,           cache_hit               )
153                 __field(bool,           bypass                  )
154         ),
155
156         TP_fast_assign(
157                 __entry->dev            = bio->bi_bdev->bd_dev;
158                 __entry->sector         = bio->bi_iter.bi_sector;
159                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
160                 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
161                               bio->bi_iter.bi_size);
162                 __entry->cache_hit = hit;
163                 __entry->bypass = bypass;
164         ),
165
166         TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
167                   MAJOR(__entry->dev), MINOR(__entry->dev),
168                   __entry->rwbs, (unsigned long long)__entry->sector,
169                   __entry->nr_sector, __entry->cache_hit, __entry->bypass)
170 );
171
172 TRACE_EVENT(bcache_write,
173         TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
174                 bool writeback, bool bypass),
175         TP_ARGS(c, inode, bio, writeback, bypass),
176
177         TP_STRUCT__entry(
178                 __array(char,           uuid,   16              )
179                 __field(u64,            inode                   )
180                 __field(sector_t,       sector                  )
181                 __field(unsigned int,   nr_sector               )
182                 __array(char,           rwbs,   6               )
183                 __field(bool,           writeback               )
184                 __field(bool,           bypass                  )
185         ),
186
187         TP_fast_assign(
188                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
189                 __entry->inode          = inode;
190                 __entry->sector         = bio->bi_iter.bi_sector;
191                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
192                 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
193                               bio->bi_iter.bi_size);
194                 __entry->writeback      = writeback;
195                 __entry->bypass         = bypass;
196         ),
197
198         TP_printk("%pU inode %llu  %s %llu + %u hit %u bypass %u",
199                   __entry->uuid, __entry->inode,
200                   __entry->rwbs, (unsigned long long)__entry->sector,
201                   __entry->nr_sector, __entry->writeback, __entry->bypass)
202 );
203
204 TRACE_EVENT(bcache_write_throttle,
205         TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, u64 delay),
206         TP_ARGS(c, inode, bio, delay),
207
208         TP_STRUCT__entry(
209                 __array(char,           uuid,   16              )
210                 __field(u64,            inode                   )
211                 __field(sector_t,       sector                  )
212                 __field(unsigned int,   nr_sector               )
213                 __array(char,           rwbs,   6               )
214                 __field(u64,            delay                   )
215         ),
216
217         TP_fast_assign(
218                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
219                 __entry->inode          = inode;
220                 __entry->sector         = bio->bi_iter.bi_sector;
221                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
222                 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
223                               bio->bi_iter.bi_size);
224                 __entry->delay          = delay;
225         ),
226
227         TP_printk("%pU inode %llu  %s %llu + %u delay %llu",
228                   __entry->uuid, __entry->inode,
229                   __entry->rwbs, (unsigned long long)__entry->sector,
230                   __entry->nr_sector, __entry->delay)
231 );
232
233 DEFINE_EVENT(bcache_bio, bcache_read_retry,
234         TP_PROTO(struct bio *bio),
235         TP_ARGS(bio)
236 );
237
238 DECLARE_EVENT_CLASS(page_alloc_fail,
239         TP_PROTO(struct cache_set *c, u64 size),
240         TP_ARGS(c, size),
241
242         TP_STRUCT__entry(
243                 __array(char,           uuid,   16      )
244                 __field(u64,            size            )
245         ),
246
247         TP_fast_assign(
248                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
249                 __entry->size = size;
250         ),
251
252         TP_printk("%pU size %llu", __entry->uuid, __entry->size)
253 );
254
255 /* Journal */
256
257 DECLARE_EVENT_CLASS(cache_set,
258         TP_PROTO(struct cache_set *c),
259         TP_ARGS(c),
260
261         TP_STRUCT__entry(
262                 __array(char,           uuid,   16 )
263         ),
264
265         TP_fast_assign(
266                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
267         ),
268
269         TP_printk("%pU", __entry->uuid)
270 );
271
272 DEFINE_EVENT(bkey, bcache_journal_replay_key,
273         TP_PROTO(const struct bkey *k),
274         TP_ARGS(k)
275 );
276
277 TRACE_EVENT(bcache_journal_next_bucket,
278         TP_PROTO(struct cache *ca, unsigned cur_idx, unsigned last_idx),
279         TP_ARGS(ca, cur_idx, last_idx),
280
281         TP_STRUCT__entry(
282                 __array(char,           uuid,   16      )
283                 __field(unsigned,       cur_idx         )
284                 __field(unsigned,       last_idx        )
285         ),
286
287         TP_fast_assign(
288                 memcpy(__entry->uuid, ca->uuid.b, 16);
289                 __entry->cur_idx        = cur_idx;
290                 __entry->last_idx       = last_idx;
291         ),
292
293         TP_printk("%pU cur %u last %u", __entry->uuid,
294                   __entry->cur_idx, __entry->last_idx)
295 );
296
297 TRACE_EVENT(bcache_journal_write_oldest,
298         TP_PROTO(struct cache_set *c, u64 seq),
299         TP_ARGS(c, seq),
300
301         TP_STRUCT__entry(
302                 __array(char,           uuid,   16      )
303                 __field(u64,            seq             )
304         ),
305
306         TP_fast_assign(
307                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
308                 __entry->seq            = seq;
309         ),
310
311         TP_printk("%pU seq %llu", __entry->uuid, __entry->seq)
312 );
313
314 TRACE_EVENT(bcache_journal_write_oldest_done,
315         TP_PROTO(struct cache_set *c, u64 seq, unsigned written),
316         TP_ARGS(c, seq, written),
317
318         TP_STRUCT__entry(
319                 __array(char,           uuid,   16      )
320                 __field(u64,            seq             )
321                 __field(unsigned,       written         )
322         ),
323
324         TP_fast_assign(
325                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
326                 __entry->seq            = seq;
327                 __entry->written        = written;
328         ),
329
330         TP_printk("%pU seq %llu written %u", __entry->uuid, __entry->seq,
331                   __entry->written)
332 );
333
334 DEFINE_EVENT(cache_set, bcache_journal_full,
335         TP_PROTO(struct cache_set *c),
336         TP_ARGS(c)
337 );
338
339 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
340         TP_PROTO(struct cache_set *c),
341         TP_ARGS(c)
342 );
343
344 DEFINE_EVENT(bcache_bio, bcache_journal_write,
345         TP_PROTO(struct bio *bio),
346         TP_ARGS(bio)
347 );
348
349 /* Device state changes */
350
351 DEFINE_EVENT(cache_set, bcache_cache_set_read_only,
352         TP_PROTO(struct cache_set *c),
353         TP_ARGS(c)
354 );
355
356 DEFINE_EVENT(cache_set, bcache_cache_set_read_only_done,
357         TP_PROTO(struct cache_set *c),
358         TP_ARGS(c)
359 );
360
361 DECLARE_EVENT_CLASS(cache,
362         TP_PROTO(struct cache *ca),
363         TP_ARGS(ca),
364
365         TP_STRUCT__entry(
366                 __array(char,           uuid,   16      )
367                 __field(unsigned,       tier            )
368         ),
369
370         TP_fast_assign(
371                 memcpy(__entry->uuid, ca->uuid.b, 16);
372                 __entry->tier = ca->mi.tier;
373         ),
374
375         TP_printk("%pU tier %u", __entry->uuid, __entry->tier)
376 );
377
378 DEFINE_EVENT(cache, bcache_cache_read_only,
379         TP_PROTO(struct cache *ca),
380         TP_ARGS(ca)
381 );
382
383 DEFINE_EVENT(cache, bcache_cache_read_only_done,
384         TP_PROTO(struct cache *ca),
385         TP_ARGS(ca)
386 );
387
388 DEFINE_EVENT(cache, bcache_cache_read_write,
389         TP_PROTO(struct cache *ca),
390         TP_ARGS(ca)
391 );
392
393 DEFINE_EVENT(cache, bcache_cache_read_write_done,
394         TP_PROTO(struct cache *ca),
395         TP_ARGS(ca)
396 );
397
398 /* Searching */
399
400 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
401         TP_PROTO(struct bpos p),
402         TP_ARGS(p)
403 );
404
405 /* Btree */
406
407 DECLARE_EVENT_CLASS(btree_node,
408         TP_PROTO(struct cache_set *c, struct btree *b),
409         TP_ARGS(c, b),
410
411         TP_STRUCT__entry(
412                 __array(char,           uuid,           16      )
413                 __field(u64,            bucket                  )
414                 __field(u8,             level                   )
415                 __field(u8,             id                      )
416                 __field(u32,            inode                   )
417                 __field(u64,            offset                  )
418         ),
419
420         TP_fast_assign(
421                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
422                 __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
423                 __entry->level          = b->level;
424                 __entry->id             = b->btree_id;
425                 __entry->inode          = b->key.k.p.inode;
426                 __entry->offset         = b->key.k.p.offset;
427         ),
428
429         TP_printk("%pU bucket %llu(%u) id %u: %u:%llu",
430                   __entry->uuid, __entry->bucket, __entry->level, __entry->id,
431                   __entry->inode, __entry->offset)
432 );
433
434 DEFINE_EVENT(btree_node, bcache_btree_read,
435         TP_PROTO(struct cache_set *c, struct btree *b),
436         TP_ARGS(c, b)
437 );
438
439 TRACE_EVENT(bcache_btree_write,
440         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
441         TP_ARGS(b, bytes, sectors),
442
443         TP_STRUCT__entry(
444                 __field(enum bkey_type, type)
445                 __field(unsigned,       bytes                   )
446                 __field(unsigned,       sectors                 )
447         ),
448
449         TP_fast_assign(
450                 __entry->type   = btree_node_type(b);
451                 __entry->bytes  = bytes;
452                 __entry->sectors = sectors;
453         ),
454
455         TP_printk("bkey type %u bytes %u sectors %u",
456                   __entry->type , __entry->bytes, __entry->sectors)
457 );
458
459 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
460         TP_PROTO(struct cache_set *c, struct btree *b),
461         TP_ARGS(c, b)
462 );
463
464 TRACE_EVENT(bcache_btree_node_alloc_fail,
465         TP_PROTO(struct cache_set *c, enum btree_id id),
466         TP_ARGS(c, id),
467
468         TP_STRUCT__entry(
469                 __array(char,           uuid,   16              )
470                 __field(enum btree_id,  id                      )
471         ),
472
473         TP_fast_assign(
474                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
475                 __entry->id = id;
476         ),
477
478         TP_printk("%pU id %u", __entry->uuid, __entry->id)
479 );
480
481 DEFINE_EVENT(btree_node, bcache_btree_node_free,
482         TP_PROTO(struct cache_set *c, struct btree *b),
483         TP_ARGS(c, b)
484 );
485
486 TRACE_EVENT(bcache_mca_reap,
487         TP_PROTO(struct cache_set *c, struct btree *b, int ret),
488         TP_ARGS(c, b, ret),
489
490         TP_STRUCT__entry(
491                 __field(u64,                    bucket          )
492                 __field(int,                    ret             )
493         ),
494
495         TP_fast_assign(
496                 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
497                 __entry->ret = ret;
498         ),
499
500         TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret)
501 );
502
503 TRACE_EVENT(bcache_mca_scan,
504         TP_PROTO(struct cache_set *c, unsigned touched, unsigned freed,
505                  unsigned can_free, unsigned long nr),
506         TP_ARGS(c, touched, freed, can_free, nr),
507
508         TP_STRUCT__entry(
509                 __array(char,           uuid,   16      )
510                 __field(unsigned long,  touched         )
511                 __field(unsigned long,  freed           )
512                 __field(unsigned long,  can_free        )
513                 __field(unsigned long,  nr              )
514         ),
515
516         TP_fast_assign(
517                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
518                 __entry->touched        = touched;
519                 __entry->freed          = freed;
520                 __entry->can_free       = can_free;
521                 __entry->nr             = nr;
522         ),
523
524         TP_printk("%pU touched %lu freed %lu can_free %lu nr %lu",
525                   __entry->uuid, __entry->touched, __entry->freed,
526                   __entry->can_free, __entry->nr)
527 );
528
529 DECLARE_EVENT_CLASS(mca_cannibalize_lock,
530         TP_PROTO(struct cache_set *c),
531         TP_ARGS(c),
532
533         TP_STRUCT__entry(
534                 __array(char,                   uuid,   16      )
535         ),
536
537         TP_fast_assign(
538                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
539         ),
540
541         TP_printk("%pU", __entry->uuid)
542 );
543
544 DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock_fail,
545         TP_PROTO(struct cache_set *c),
546         TP_ARGS(c)
547 );
548
549 DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock,
550         TP_PROTO(struct cache_set *c),
551         TP_ARGS(c)
552 );
553
554 DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize,
555         TP_PROTO(struct cache_set *c),
556         TP_ARGS(c)
557 );
558
559 DEFINE_EVENT(cache_set, bcache_mca_cannibalize_unlock,
560         TP_PROTO(struct cache_set *c),
561         TP_ARGS(c)
562 );
563
564 TRACE_EVENT(bcache_btree_insert_key,
565         TP_PROTO(struct cache_set *c, struct btree *b, struct bkey_i *k),
566         TP_ARGS(c, b, k),
567
568         TP_STRUCT__entry(
569                 __field(u64,            b_bucket                )
570                 __field(u64,            b_offset                )
571                 __field(u64,            offset                  )
572                 __field(u32,            b_inode                 )
573                 __field(u32,            inode                   )
574                 __field(u32,            size                    )
575                 __field(u8,             level                   )
576                 __field(u8,             id                      )
577         ),
578
579         TP_fast_assign(
580                 __entry->b_bucket       = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
581                 __entry->level          = b->level;
582                 __entry->id             = b->btree_id;
583                 __entry->b_inode        = b->key.k.p.inode;
584                 __entry->b_offset       = b->key.k.p.offset;
585                 __entry->inode          = k->k.p.inode;
586                 __entry->offset         = k->k.p.offset;
587                 __entry->size           = k->k.size;
588         ),
589
590         TP_printk("bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
591                   __entry->b_bucket, __entry->level, __entry->id,
592                   __entry->b_inode, __entry->b_offset,
593                   __entry->inode, __entry->offset, __entry->size)
594 );
595
596 DECLARE_EVENT_CLASS(btree_split,
597         TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
598         TP_ARGS(c, b, keys),
599
600         TP_STRUCT__entry(
601                 __field(u64,            bucket                  )
602                 __field(u8,             level                   )
603                 __field(u8,             id                      )
604                 __field(u32,            inode                   )
605                 __field(u64,            offset                  )
606                 __field(u32,            keys                    )
607         ),
608
609         TP_fast_assign(
610                 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
611                 __entry->level  = b->level;
612                 __entry->id     = b->btree_id;
613                 __entry->inode  = b->key.k.p.inode;
614                 __entry->offset = b->key.k.p.offset;
615                 __entry->keys   = keys;
616         ),
617
618         TP_printk("bucket %llu(%u) id %u: %u:%llu keys %u",
619                   __entry->bucket, __entry->level, __entry->id,
620                   __entry->inode, __entry->offset, __entry->keys)
621 );
622
623 DEFINE_EVENT(btree_split, bcache_btree_node_split,
624         TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
625         TP_ARGS(c, b, keys)
626 );
627
628 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
629         TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
630         TP_ARGS(c, b, keys)
631 );
632
633 DEFINE_EVENT(btree_node, bcache_btree_set_root,
634         TP_PROTO(struct cache_set *c, struct btree *b),
635         TP_ARGS(c, b)
636 );
637
638 /* Garbage collection */
639
640 TRACE_EVENT(bcache_btree_gc_coalesce,
641         TP_PROTO(struct cache_set *c, struct btree *b, unsigned nodes),
642         TP_ARGS(c, b, nodes),
643
644         TP_STRUCT__entry(
645                 __field(u64,            bucket                  )
646                 __field(u8,             level                   )
647                 __field(u8,             id                      )
648                 __field(u32,            inode                   )
649                 __field(u64,            offset                  )
650                 __field(unsigned,       nodes                   )
651         ),
652
653         TP_fast_assign(
654                 __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
655                 __entry->level          = b->level;
656                 __entry->id             = b->btree_id;
657                 __entry->inode          = b->key.k.p.inode;
658                 __entry->offset         = b->key.k.p.offset;
659                 __entry->nodes          = nodes;
660         ),
661
662         TP_printk("bucket %llu(%u) id %u: %u:%llu nodes %u",
663                   __entry->bucket, __entry->level, __entry->id,
664                   __entry->inode, __entry->offset, __entry->nodes)
665 );
666
667 TRACE_EVENT(bcache_btree_gc_coalesce_fail,
668         TP_PROTO(struct cache_set *c, int reason),
669         TP_ARGS(c, reason),
670
671         TP_STRUCT__entry(
672                 __field(u8,             reason                  )
673                 __array(char,           uuid,   16              )
674         ),
675
676         TP_fast_assign(
677                 __entry->reason         = reason;
678                 memcpy(__entry->uuid, c->disk_sb->user_uuid.b, 16);
679         ),
680
681         TP_printk("%pU: %u", __entry->uuid, __entry->reason)
682 );
683
684 TRACE_EVENT(bcache_btree_node_alloc_replacement,
685         TP_PROTO(struct cache_set *c, struct btree *old, struct btree *b),
686         TP_ARGS(c, old, b),
687
688         TP_STRUCT__entry(
689                 __array(char,           uuid,           16      )
690                 __field(u64,            bucket                  )
691                 __field(u64,            old_bucket              )
692                 __field(u8,             level                   )
693                 __field(u8,             id                      )
694                 __field(u32,            inode                   )
695                 __field(u64,            offset                  )
696         ),
697
698         TP_fast_assign(
699                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
700                 __entry->old_bucket     = PTR_BUCKET_NR_TRACE(c,
701                                                               &old->key, 0);
702                 __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
703                 __entry->level          = b->level;
704                 __entry->id             = b->btree_id;
705                 __entry->inode          = b->key.k.p.inode;
706                 __entry->offset         = b->key.k.p.offset;
707         ),
708
709         TP_printk("%pU for %llu bucket %llu(%u) id %u: %u:%llu",
710                   __entry->uuid, __entry->old_bucket, __entry->bucket,
711                   __entry->level, __entry->id,
712                   __entry->inode, __entry->offset)
713 );
714
715 DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node,
716         TP_PROTO(struct cache_set *c, struct btree *b),
717         TP_ARGS(c, b)
718 );
719
720 DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node_fail,
721         TP_PROTO(struct cache_set *c, struct btree *b),
722         TP_ARGS(c, b)
723 );
724
725 DEFINE_EVENT(cache_set, bcache_gc_start,
726         TP_PROTO(struct cache_set *c),
727         TP_ARGS(c)
728 );
729
730 DEFINE_EVENT(cache_set, bcache_gc_end,
731         TP_PROTO(struct cache_set *c),
732         TP_ARGS(c)
733 );
734
735 DEFINE_EVENT(cache_set, bcache_gc_coalesce_start,
736         TP_PROTO(struct cache_set *c),
737         TP_ARGS(c)
738 );
739
740 DEFINE_EVENT(cache_set, bcache_gc_coalesce_end,
741         TP_PROTO(struct cache_set *c),
742         TP_ARGS(c)
743 );
744
745 DEFINE_EVENT(cache, bcache_sectors_saturated,
746         TP_PROTO(struct cache *ca),
747         TP_ARGS(ca)
748 );
749
750 DEFINE_EVENT(cache_set, bcache_gc_sectors_saturated,
751         TP_PROTO(struct cache_set *c),
752         TP_ARGS(c)
753 );
754
755 DEFINE_EVENT(cache_set, bcache_gc_cannot_inc_gens,
756         TP_PROTO(struct cache_set *c),
757         TP_ARGS(c)
758 );
759
760 DEFINE_EVENT(cache_set, bcache_gc_periodic,
761         TP_PROTO(struct cache_set *c),
762         TP_ARGS(c)
763 );
764
765 TRACE_EVENT(bcache_mark_bucket,
766         TP_PROTO(struct cache *ca, const struct bkey *k,
767                  const struct bch_extent_ptr *ptr,
768                  int sectors, bool dirty),
769         TP_ARGS(ca, k, ptr, sectors, dirty),
770
771         TP_STRUCT__entry(
772                 __array(char,           uuid,           16      )
773                 __field(u32,            inode                   )
774                 __field(u64,            offset                  )
775                 __field(u32,            sectors                 )
776                 __field(u64,            bucket                  )
777                 __field(bool,           dirty                   )
778         ),
779
780         TP_fast_assign(
781                 memcpy(__entry->uuid, ca->uuid.b, 16);
782                 __entry->inode          = k->p.inode;
783                 __entry->offset         = k->p.offset;
784                 __entry->sectors        = sectors;
785                 __entry->bucket         = PTR_BUCKET_NR(ca, ptr);
786                 __entry->dirty          = dirty;
787         ),
788
789         TP_printk("%pU %u:%llu sectors %i bucket %llu dirty %i",
790                   __entry->uuid, __entry->inode, __entry->offset,
791                   __entry->sectors, __entry->bucket, __entry->dirty)
792 );
793
794 /* Allocator */
795
796 TRACE_EVENT(bcache_alloc_batch,
797         TP_PROTO(struct cache *ca, size_t free, size_t total),
798         TP_ARGS(ca, free, total),
799
800         TP_STRUCT__entry(
801                 __array(char,           uuid,   16      )
802                 __field(size_t,         free            )
803                 __field(size_t,         total           )
804         ),
805
806         TP_fast_assign(
807                 memcpy(__entry->uuid, ca->uuid.b, 16);
808                 __entry->free = free;
809                 __entry->total = total;
810         ),
811
812         TP_printk("%pU free %zu total %zu",
813                 __entry->uuid, __entry->free, __entry->total)
814 );
815
816 TRACE_EVENT(bcache_btree_reserve_get_fail,
817         TP_PROTO(struct cache_set *c, size_t required, struct closure *cl),
818         TP_ARGS(c, required, cl),
819
820         TP_STRUCT__entry(
821                 __array(char,                   uuid,   16      )
822                 __field(size_t,                 required        )
823                 __field(struct closure *,       cl              )
824         ),
825
826         TP_fast_assign(
827                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
828                 __entry->required = required;
829                 __entry->cl = cl;
830         ),
831
832         TP_printk("%pU required %zu by %p", __entry->uuid,
833                   __entry->required, __entry->cl)
834 );
835
836 DEFINE_EVENT(cache, bcache_prio_write_start,
837         TP_PROTO(struct cache *ca),
838         TP_ARGS(ca)
839 );
840
841 DEFINE_EVENT(cache, bcache_prio_write_end,
842         TP_PROTO(struct cache *ca),
843         TP_ARGS(ca)
844 );
845
846 TRACE_EVENT(bcache_invalidate,
847         TP_PROTO(struct cache *ca, size_t bucket, unsigned sectors),
848         TP_ARGS(ca, bucket, sectors),
849
850         TP_STRUCT__entry(
851                 __field(unsigned,       sectors                 )
852                 __field(dev_t,          dev                     )
853                 __field(__u64,          offset                  )
854         ),
855
856         TP_fast_assign(
857                 __entry->dev            = ca->disk_sb.bdev->bd_dev;
858                 __entry->offset         = bucket << ca->bucket_bits;
859                 __entry->sectors        = sectors;
860         ),
861
862         TP_printk("invalidated %u sectors at %d,%d sector=%llu",
863                   __entry->sectors, MAJOR(__entry->dev),
864                   MINOR(__entry->dev), __entry->offset)
865 );
866
867 DEFINE_EVENT(cache_set, bcache_rescale_prios,
868         TP_PROTO(struct cache_set *c),
869         TP_ARGS(c)
870 );
871
872 DECLARE_EVENT_CLASS(cache_bucket_alloc,
873         TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
874         TP_ARGS(ca, reserve),
875
876         TP_STRUCT__entry(
877                 __array(char,                   uuid,   16)
878                 __field(enum alloc_reserve,     reserve   )
879         ),
880
881         TP_fast_assign(
882                 memcpy(__entry->uuid, ca->uuid.b, 16);
883                 __entry->reserve = reserve;
884         ),
885
886         TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
887 );
888
889 DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc,
890         TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
891         TP_ARGS(ca, reserve)
892 );
893
894 DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc_fail,
895         TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
896         TP_ARGS(ca, reserve)
897 );
898
899 DECLARE_EVENT_CLASS(cache_set_bucket_alloc,
900         TP_PROTO(struct cache_set *c, enum alloc_reserve reserve,
901                  struct closure *cl),
902         TP_ARGS(c, reserve, cl),
903
904         TP_STRUCT__entry(
905                 __array(char,                   uuid,   16      )
906                 __field(enum alloc_reserve,     reserve         )
907                 __field(struct closure *,       cl              )
908         ),
909
910         TP_fast_assign(
911                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
912                 __entry->reserve = reserve;
913                 __entry->cl = cl;
914         ),
915
916         TP_printk("%pU reserve %d cl %p", __entry->uuid, __entry->reserve,
917                   __entry->cl)
918 );
919
920 DEFINE_EVENT(cache_set_bucket_alloc, bcache_freelist_empty_fail,
921         TP_PROTO(struct cache_set *c, enum alloc_reserve reserve,
922                  struct closure *cl),
923         TP_ARGS(c, reserve, cl)
924 );
925
926 DECLARE_EVENT_CLASS(open_bucket_alloc,
927         TP_PROTO(struct cache_set *c, struct closure *cl),
928         TP_ARGS(c, cl),
929
930         TP_STRUCT__entry(
931                 __array(char,                   uuid,   16      )
932                 __field(struct closure *,       cl              )
933         ),
934
935         TP_fast_assign(
936                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
937                 __entry->cl = cl;
938         ),
939
940         TP_printk("%pU cl %p",
941                   __entry->uuid, __entry->cl)
942 );
943
944 DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc,
945         TP_PROTO(struct cache_set *c, struct closure *cl),
946         TP_ARGS(c, cl)
947 );
948
949 DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc_fail,
950         TP_PROTO(struct cache_set *c, struct closure *cl),
951         TP_ARGS(c, cl)
952 );
953
954 /* Keylists */
955
956 TRACE_EVENT(bcache_keyscan,
957         TP_PROTO(unsigned nr_found,
958                  unsigned start_inode, u64 start_offset,
959                  unsigned end_inode, u64 end_offset),
960         TP_ARGS(nr_found,
961                 start_inode, start_offset,
962                 end_inode, end_offset),
963
964         TP_STRUCT__entry(
965                 __field(__u32,  nr_found                        )
966                 __field(__u32,  start_inode                     )
967                 __field(__u64,  start_offset                    )
968                 __field(__u32,  end_inode                       )
969                 __field(__u64,  end_offset                      )
970         ),
971
972         TP_fast_assign(
973                 __entry->nr_found       = nr_found;
974                 __entry->start_inode    = start_inode;
975                 __entry->start_offset   = start_offset;
976                 __entry->end_inode      = end_inode;
977                 __entry->end_offset     = end_offset;
978         ),
979
980         TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
981                   __entry->start_inode, __entry->start_offset,
982                   __entry->end_inode, __entry->end_offset)
983 );
984
985 /* Moving IO */
986
987 DECLARE_EVENT_CLASS(moving_io,
988         TP_PROTO(struct bkey *k),
989         TP_ARGS(k),
990
991         TP_STRUCT__entry(
992                 __field(__u32,          inode                   )
993                 __field(__u64,          offset                  )
994                 __field(__u32,          sectors                 )
995         ),
996
997         TP_fast_assign(
998                 __entry->inode          = k->p.inode;
999                 __entry->offset         = k->p.offset;
1000                 __entry->sectors        = k->size;
1001         ),
1002
1003         TP_printk("%u:%llu sectors %u",
1004                   __entry->inode, __entry->offset, __entry->sectors)
1005 );
1006
1007 DEFINE_EVENT(moving_io, bcache_move_read,
1008         TP_PROTO(struct bkey *k),
1009         TP_ARGS(k)
1010 );
1011
1012 DEFINE_EVENT(moving_io, bcache_move_read_done,
1013         TP_PROTO(struct bkey *k),
1014         TP_ARGS(k)
1015 );
1016
1017 DEFINE_EVENT(moving_io, bcache_move_write,
1018         TP_PROTO(struct bkey *k),
1019         TP_ARGS(k)
1020 );
1021
1022 DEFINE_EVENT(moving_io, bcache_move_write_done,
1023         TP_PROTO(struct bkey *k),
1024         TP_ARGS(k)
1025 );
1026
1027 DEFINE_EVENT(moving_io, bcache_copy_collision,
1028         TP_PROTO(struct bkey *k),
1029         TP_ARGS(k)
1030 );
1031
1032 /* Copy GC */
1033
1034 DEFINE_EVENT(page_alloc_fail, bcache_moving_gc_alloc_fail,
1035         TP_PROTO(struct cache_set *c, u64 size),
1036         TP_ARGS(c, size)
1037 );
1038
1039 DEFINE_EVENT(cache, bcache_moving_gc_start,
1040         TP_PROTO(struct cache *ca),
1041         TP_ARGS(ca)
1042 );
1043
1044 TRACE_EVENT(bcache_moving_gc_end,
1045         TP_PROTO(struct cache *ca, u64 sectors_moved, u64 keys_moved,
1046                 u64 buckets_moved),
1047         TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
1048
1049         TP_STRUCT__entry(
1050                 __array(char,           uuid,   16      )
1051                 __field(u64,            sectors_moved   )
1052                 __field(u64,            keys_moved      )
1053                 __field(u64,            buckets_moved   )
1054         ),
1055
1056         TP_fast_assign(
1057                 memcpy(__entry->uuid, ca->uuid.b, 16);
1058                 __entry->sectors_moved = sectors_moved;
1059                 __entry->keys_moved = keys_moved;
1060                 __entry->buckets_moved = buckets_moved;
1061         ),
1062
1063         TP_printk("%pU sectors_moved %llu keys_moved %llu buckets_moved %llu",
1064                 __entry->uuid, __entry->sectors_moved, __entry->keys_moved,
1065                 __entry->buckets_moved)
1066 );
1067
1068 DEFINE_EVENT(cache, bcache_moving_gc_reserve_empty,
1069         TP_PROTO(struct cache *ca),
1070         TP_ARGS(ca)
1071 );
1072
1073 DEFINE_EVENT(cache, bcache_moving_gc_no_work,
1074         TP_PROTO(struct cache *ca),
1075         TP_ARGS(ca)
1076 );
1077
1078 DEFINE_EVENT(bkey, bcache_gc_copy,
1079         TP_PROTO(const struct bkey *k),
1080         TP_ARGS(k)
1081 );
1082
1083 /* Tiering */
1084
1085 DEFINE_EVENT(cache_set, bcache_tiering_refill_start,
1086         TP_PROTO(struct cache_set *c),
1087         TP_ARGS(c)
1088 );
1089
1090 DEFINE_EVENT(cache_set, bcache_tiering_refill_end,
1091         TP_PROTO(struct cache_set *c),
1092         TP_ARGS(c)
1093 );
1094
1095 DEFINE_EVENT(page_alloc_fail, bcache_tiering_alloc_fail,
1096         TP_PROTO(struct cache_set *c, u64 size),
1097         TP_ARGS(c, size)
1098 );
1099
1100 DEFINE_EVENT(cache_set, bcache_tiering_start,
1101         TP_PROTO(struct cache_set *c),
1102         TP_ARGS(c)
1103 );
1104
1105 TRACE_EVENT(bcache_tiering_end,
1106         TP_PROTO(struct cache_set *c, u64 sectors_moved,
1107                 u64 keys_moved),
1108         TP_ARGS(c, sectors_moved, keys_moved),
1109
1110         TP_STRUCT__entry(
1111                 __array(char,           uuid,   16      )
1112                 __field(u64,            sectors_moved   )
1113                 __field(u64,            keys_moved      )
1114         ),
1115
1116         TP_fast_assign(
1117                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
1118                 __entry->sectors_moved = sectors_moved;
1119                 __entry->keys_moved = keys_moved;
1120         ),
1121
1122         TP_printk("%pU sectors_moved %llu keys_moved %llu",
1123                 __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
1124 );
1125
1126 DEFINE_EVENT(bkey, bcache_tiering_copy,
1127         TP_PROTO(const struct bkey *k),
1128         TP_ARGS(k)
1129 );
1130
1131 /* Background writeback */
1132
1133 DEFINE_EVENT(bkey, bcache_writeback,
1134         TP_PROTO(const struct bkey *k),
1135         TP_ARGS(k)
1136 );
1137
1138 DEFINE_EVENT(bkey, bcache_writeback_collision,
1139         TP_PROTO(const struct bkey *k),
1140         TP_ARGS(k)
1141 );
1142
1143 TRACE_EVENT(bcache_writeback_error,
1144         TP_PROTO(struct bkey *k, bool write, int error),
1145         TP_ARGS(k, write, error),
1146
1147         TP_STRUCT__entry(
1148                 __field(u32,    size                            )
1149                 __field(u32,    inode                           )
1150                 __field(u64,    offset                          )
1151                 __field(bool,   write                           )
1152                 __field(int,    error                           )
1153         ),
1154
1155         TP_fast_assign(
1156                 __entry->inode  = k->p.inode;
1157                 __entry->offset = k->p.offset;
1158                 __entry->size   = k->size;
1159                 __entry->write  = write;
1160                 __entry->error  = error;
1161         ),
1162
1163         TP_printk("%u:%llu len %u %s error %d", __entry->inode,
1164                   __entry->offset, __entry->size,
1165                   __entry->write ? "write" : "read",
1166                   __entry->error)
1167 );
1168
1169 DEFINE_EVENT(page_alloc_fail, bcache_writeback_alloc_fail,
1170         TP_PROTO(struct cache_set *c, u64 size),
1171         TP_ARGS(c, size)
1172 );
1173
1174 #endif /* _TRACE_BCACHE_H */
1175
1176 /* This part must be outside protection */
1177 #include <trace/define_trace.h>