]> git.sesse.net Git - bcachefs-tools-debian/blob - include/trace/events/bcachefs.h
06cb5ff33b32a44d6ae736c9ccda94260ebf88f7
[bcachefs-tools-debian] / include / trace / events / bcachefs.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM bcachefs
3
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
6
7 #include <linux/tracepoint.h>
8
9 DECLARE_EVENT_CLASS(bpos,
10         TP_PROTO(struct bpos p),
11         TP_ARGS(p),
12
13         TP_STRUCT__entry(
14                 __field(u64,    inode                           )
15                 __field(u64,    offset                          )
16         ),
17
18         TP_fast_assign(
19                 __entry->inode  = p.inode;
20                 __entry->offset = p.offset;
21         ),
22
23         TP_printk("%llu:%llu", __entry->inode, __entry->offset)
24 );
25
26 DECLARE_EVENT_CLASS(bkey,
27         TP_PROTO(const struct bkey *k),
28         TP_ARGS(k),
29
30         TP_STRUCT__entry(
31                 __field(u64,    inode                           )
32                 __field(u64,    offset                          )
33                 __field(u32,    size                            )
34         ),
35
36         TP_fast_assign(
37                 __entry->inode  = k->p.inode;
38                 __entry->offset = k->p.offset;
39                 __entry->size   = k->size;
40         ),
41
42         TP_printk("%llu:%llu len %u", __entry->inode,
43                   __entry->offset, __entry->size)
44 );
45
46 DECLARE_EVENT_CLASS(bch_dev,
47         TP_PROTO(struct bch_dev *ca),
48         TP_ARGS(ca),
49
50         TP_STRUCT__entry(
51                 __array(char,           uuid,   16      )
52                 __field(unsigned,       tier            )
53         ),
54
55         TP_fast_assign(
56                 memcpy(__entry->uuid, ca->uuid.b, 16);
57                 __entry->tier = ca->mi.tier;
58         ),
59
60         TP_printk("%pU tier %u", __entry->uuid, __entry->tier)
61 );
62
63 DECLARE_EVENT_CLASS(bch_fs,
64         TP_PROTO(struct bch_fs *c),
65         TP_ARGS(c),
66
67         TP_STRUCT__entry(
68                 __array(char,           uuid,   16 )
69         ),
70
71         TP_fast_assign(
72                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
73         ),
74
75         TP_printk("%pU", __entry->uuid)
76 );
77
78 DECLARE_EVENT_CLASS(bio,
79         TP_PROTO(struct bio *bio),
80         TP_ARGS(bio),
81
82         TP_STRUCT__entry(
83                 __field(dev_t,          dev                     )
84                 __field(sector_t,       sector                  )
85                 __field(unsigned int,   nr_sector               )
86                 __array(char,           rwbs,   6               )
87         ),
88
89         TP_fast_assign(
90                 __entry->dev            = bio->bi_bdev->bd_dev;
91                 __entry->sector         = bio->bi_iter.bi_sector;
92                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
93                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
94         ),
95
96         TP_printk("%d,%d  %s %llu + %u",
97                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
98                   (unsigned long long)__entry->sector, __entry->nr_sector)
99 );
100
101 DECLARE_EVENT_CLASS(page_alloc_fail,
102         TP_PROTO(struct bch_fs *c, u64 size),
103         TP_ARGS(c, size),
104
105         TP_STRUCT__entry(
106                 __array(char,           uuid,   16      )
107                 __field(u64,            size            )
108         ),
109
110         TP_fast_assign(
111                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
112                 __entry->size = size;
113         ),
114
115         TP_printk("%pU size %llu", __entry->uuid, __entry->size)
116 );
117
118 /* io.c: */
119
120 DEFINE_EVENT(bio, read_split,
121         TP_PROTO(struct bio *bio),
122         TP_ARGS(bio)
123 );
124
125 DEFINE_EVENT(bio, read_bounce,
126         TP_PROTO(struct bio *bio),
127         TP_ARGS(bio)
128 );
129
130 DEFINE_EVENT(bio, read_retry,
131         TP_PROTO(struct bio *bio),
132         TP_ARGS(bio)
133 );
134
135 DEFINE_EVENT(bio, promote,
136         TP_PROTO(struct bio *bio),
137         TP_ARGS(bio)
138 );
139
140 TRACE_EVENT(write_throttle,
141         TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, u64 delay),
142         TP_ARGS(c, inode, bio, delay),
143
144         TP_STRUCT__entry(
145                 __array(char,           uuid,   16              )
146                 __field(u64,            inode                   )
147                 __field(sector_t,       sector                  )
148                 __field(unsigned int,   nr_sector               )
149                 __array(char,           rwbs,   6               )
150                 __field(u64,            delay                   )
151         ),
152
153         TP_fast_assign(
154                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
155                 __entry->inode          = inode;
156                 __entry->sector         = bio->bi_iter.bi_sector;
157                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
158                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
159                 __entry->delay          = delay;
160         ),
161
162         TP_printk("%pU inode %llu  %s %llu + %u delay %llu",
163                   __entry->uuid, __entry->inode,
164                   __entry->rwbs, (unsigned long long)__entry->sector,
165                   __entry->nr_sector, __entry->delay)
166 );
167
168 /* Journal */
169
170 DEFINE_EVENT(bch_fs, journal_full,
171         TP_PROTO(struct bch_fs *c),
172         TP_ARGS(c)
173 );
174
175 DEFINE_EVENT(bch_fs, journal_entry_full,
176         TP_PROTO(struct bch_fs *c),
177         TP_ARGS(c)
178 );
179
180 DEFINE_EVENT(bio, journal_write,
181         TP_PROTO(struct bio *bio),
182         TP_ARGS(bio)
183 );
184
185 /* bset.c: */
186
187 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
188         TP_PROTO(struct bpos p),
189         TP_ARGS(p)
190 );
191
192 /* Btree */
193
194 DECLARE_EVENT_CLASS(btree_node,
195         TP_PROTO(struct bch_fs *c, struct btree *b),
196         TP_ARGS(c, b),
197
198         TP_STRUCT__entry(
199                 __array(char,           uuid,           16      )
200                 __field(u64,            bucket                  )
201                 __field(u8,             level                   )
202                 __field(u8,             id                      )
203                 __field(u32,            inode                   )
204                 __field(u64,            offset                  )
205         ),
206
207         TP_fast_assign(
208                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
209                 __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
210                 __entry->level          = b->level;
211                 __entry->id             = b->btree_id;
212                 __entry->inode          = b->key.k.p.inode;
213                 __entry->offset         = b->key.k.p.offset;
214         ),
215
216         TP_printk("%pU bucket %llu(%u) id %u: %u:%llu",
217                   __entry->uuid, __entry->bucket, __entry->level, __entry->id,
218                   __entry->inode, __entry->offset)
219 );
220
221 DEFINE_EVENT(btree_node, btree_read,
222         TP_PROTO(struct bch_fs *c, struct btree *b),
223         TP_ARGS(c, b)
224 );
225
226 TRACE_EVENT(btree_write,
227         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
228         TP_ARGS(b, bytes, sectors),
229
230         TP_STRUCT__entry(
231                 __field(enum bkey_type, type)
232                 __field(unsigned,       bytes                   )
233                 __field(unsigned,       sectors                 )
234         ),
235
236         TP_fast_assign(
237                 __entry->type   = btree_node_type(b);
238                 __entry->bytes  = bytes;
239                 __entry->sectors = sectors;
240         ),
241
242         TP_printk("bkey type %u bytes %u sectors %u",
243                   __entry->type , __entry->bytes, __entry->sectors)
244 );
245
246 DEFINE_EVENT(btree_node, btree_node_alloc,
247         TP_PROTO(struct bch_fs *c, struct btree *b),
248         TP_ARGS(c, b)
249 );
250
251 DEFINE_EVENT(btree_node, btree_node_free,
252         TP_PROTO(struct bch_fs *c, struct btree *b),
253         TP_ARGS(c, b)
254 );
255
256 TRACE_EVENT(btree_node_reap,
257         TP_PROTO(struct bch_fs *c, struct btree *b, int ret),
258         TP_ARGS(c, b, ret),
259
260         TP_STRUCT__entry(
261                 __field(u64,                    bucket          )
262                 __field(int,                    ret             )
263         ),
264
265         TP_fast_assign(
266                 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
267                 __entry->ret = ret;
268         ),
269
270         TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret)
271 );
272
273 DECLARE_EVENT_CLASS(btree_node_cannibalize_lock,
274         TP_PROTO(struct bch_fs *c),
275         TP_ARGS(c),
276
277         TP_STRUCT__entry(
278                 __array(char,                   uuid,   16      )
279         ),
280
281         TP_fast_assign(
282                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
283         ),
284
285         TP_printk("%pU", __entry->uuid)
286 );
287
288 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail,
289         TP_PROTO(struct bch_fs *c),
290         TP_ARGS(c)
291 );
292
293 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock,
294         TP_PROTO(struct bch_fs *c),
295         TP_ARGS(c)
296 );
297
298 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize,
299         TP_PROTO(struct bch_fs *c),
300         TP_ARGS(c)
301 );
302
303 DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
304         TP_PROTO(struct bch_fs *c),
305         TP_ARGS(c)
306 );
307
308 TRACE_EVENT(btree_reserve_get_fail,
309         TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
310         TP_ARGS(c, required, cl),
311
312         TP_STRUCT__entry(
313                 __array(char,                   uuid,   16      )
314                 __field(size_t,                 required        )
315                 __field(struct closure *,       cl              )
316         ),
317
318         TP_fast_assign(
319                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
320                 __entry->required = required;
321                 __entry->cl = cl;
322         ),
323
324         TP_printk("%pU required %zu by %p", __entry->uuid,
325                   __entry->required, __entry->cl)
326 );
327
328 TRACE_EVENT(btree_insert_key,
329         TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
330         TP_ARGS(c, b, k),
331
332         TP_STRUCT__entry(
333                 __field(u64,            b_bucket                )
334                 __field(u64,            b_offset                )
335                 __field(u64,            offset                  )
336                 __field(u32,            b_inode                 )
337                 __field(u32,            inode                   )
338                 __field(u32,            size                    )
339                 __field(u8,             level                   )
340                 __field(u8,             id                      )
341         ),
342
343         TP_fast_assign(
344                 __entry->b_bucket       = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
345                 __entry->level          = b->level;
346                 __entry->id             = b->btree_id;
347                 __entry->b_inode        = b->key.k.p.inode;
348                 __entry->b_offset       = b->key.k.p.offset;
349                 __entry->inode          = k->k.p.inode;
350                 __entry->offset         = k->k.p.offset;
351                 __entry->size           = k->k.size;
352         ),
353
354         TP_printk("bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
355                   __entry->b_bucket, __entry->level, __entry->id,
356                   __entry->b_inode, __entry->b_offset,
357                   __entry->inode, __entry->offset, __entry->size)
358 );
359
360 DECLARE_EVENT_CLASS(btree_split,
361         TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
362         TP_ARGS(c, b, keys),
363
364         TP_STRUCT__entry(
365                 __field(u64,            bucket                  )
366                 __field(u8,             level                   )
367                 __field(u8,             id                      )
368                 __field(u32,            inode                   )
369                 __field(u64,            offset                  )
370                 __field(u32,            keys                    )
371         ),
372
373         TP_fast_assign(
374                 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
375                 __entry->level  = b->level;
376                 __entry->id     = b->btree_id;
377                 __entry->inode  = b->key.k.p.inode;
378                 __entry->offset = b->key.k.p.offset;
379                 __entry->keys   = keys;
380         ),
381
382         TP_printk("bucket %llu(%u) id %u: %u:%llu keys %u",
383                   __entry->bucket, __entry->level, __entry->id,
384                   __entry->inode, __entry->offset, __entry->keys)
385 );
386
387 DEFINE_EVENT(btree_split, btree_node_split,
388         TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
389         TP_ARGS(c, b, keys)
390 );
391
392 DEFINE_EVENT(btree_split, btree_node_compact,
393         TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
394         TP_ARGS(c, b, keys)
395 );
396
397 DEFINE_EVENT(btree_node, btree_set_root,
398         TP_PROTO(struct bch_fs *c, struct btree *b),
399         TP_ARGS(c, b)
400 );
401
402 /* Garbage collection */
403
404 TRACE_EVENT(btree_gc_coalesce,
405         TP_PROTO(struct bch_fs *c, struct btree *b, unsigned nodes),
406         TP_ARGS(c, b, nodes),
407
408         TP_STRUCT__entry(
409                 __field(u64,            bucket                  )
410                 __field(u8,             level                   )
411                 __field(u8,             id                      )
412                 __field(u32,            inode                   )
413                 __field(u64,            offset                  )
414                 __field(unsigned,       nodes                   )
415         ),
416
417         TP_fast_assign(
418                 __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
419                 __entry->level          = b->level;
420                 __entry->id             = b->btree_id;
421                 __entry->inode          = b->key.k.p.inode;
422                 __entry->offset         = b->key.k.p.offset;
423                 __entry->nodes          = nodes;
424         ),
425
426         TP_printk("bucket %llu(%u) id %u: %u:%llu nodes %u",
427                   __entry->bucket, __entry->level, __entry->id,
428                   __entry->inode, __entry->offset, __entry->nodes)
429 );
430
431 TRACE_EVENT(btree_gc_coalesce_fail,
432         TP_PROTO(struct bch_fs *c, int reason),
433         TP_ARGS(c, reason),
434
435         TP_STRUCT__entry(
436                 __field(u8,             reason                  )
437                 __array(char,           uuid,   16              )
438         ),
439
440         TP_fast_assign(
441                 __entry->reason         = reason;
442                 memcpy(__entry->uuid, c->disk_sb->user_uuid.b, 16);
443         ),
444
445         TP_printk("%pU: %u", __entry->uuid, __entry->reason)
446 );
447
448 DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
449         TP_PROTO(struct bch_fs *c, struct btree *b),
450         TP_ARGS(c, b)
451 );
452
453 DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
454         TP_PROTO(struct bch_fs *c, struct btree *b),
455         TP_ARGS(c, b)
456 );
457
458 DEFINE_EVENT(bch_fs, gc_start,
459         TP_PROTO(struct bch_fs *c),
460         TP_ARGS(c)
461 );
462
463 DEFINE_EVENT(bch_fs, gc_end,
464         TP_PROTO(struct bch_fs *c),
465         TP_ARGS(c)
466 );
467
468 DEFINE_EVENT(bch_fs, gc_coalesce_start,
469         TP_PROTO(struct bch_fs *c),
470         TP_ARGS(c)
471 );
472
473 DEFINE_EVENT(bch_fs, gc_coalesce_end,
474         TP_PROTO(struct bch_fs *c),
475         TP_ARGS(c)
476 );
477
478 DEFINE_EVENT(bch_dev, sectors_saturated,
479         TP_PROTO(struct bch_dev *ca),
480         TP_ARGS(ca)
481 );
482
483 DEFINE_EVENT(bch_fs, gc_sectors_saturated,
484         TP_PROTO(struct bch_fs *c),
485         TP_ARGS(c)
486 );
487
488 DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
489         TP_PROTO(struct bch_fs *c),
490         TP_ARGS(c)
491 );
492
493 /* Allocator */
494
495 TRACE_EVENT(alloc_batch,
496         TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
497         TP_ARGS(ca, free, total),
498
499         TP_STRUCT__entry(
500                 __array(char,           uuid,   16      )
501                 __field(size_t,         free            )
502                 __field(size_t,         total           )
503         ),
504
505         TP_fast_assign(
506                 memcpy(__entry->uuid, ca->uuid.b, 16);
507                 __entry->free = free;
508                 __entry->total = total;
509         ),
510
511         TP_printk("%pU free %zu total %zu",
512                 __entry->uuid, __entry->free, __entry->total)
513 );
514
515 DEFINE_EVENT(bch_dev, prio_write_start,
516         TP_PROTO(struct bch_dev *ca),
517         TP_ARGS(ca)
518 );
519
520 DEFINE_EVENT(bch_dev, prio_write_end,
521         TP_PROTO(struct bch_dev *ca),
522         TP_ARGS(ca)
523 );
524
525 TRACE_EVENT(invalidate,
526         TP_PROTO(struct bch_dev *ca, size_t bucket, unsigned sectors),
527         TP_ARGS(ca, bucket, sectors),
528
529         TP_STRUCT__entry(
530                 __field(unsigned,       sectors                 )
531                 __field(dev_t,          dev                     )
532                 __field(__u64,          offset                  )
533         ),
534
535         TP_fast_assign(
536                 __entry->dev            = ca->disk_sb.bdev->bd_dev;
537                 __entry->offset         = bucket << ca->bucket_bits;
538                 __entry->sectors        = sectors;
539         ),
540
541         TP_printk("invalidated %u sectors at %d,%d sector=%llu",
542                   __entry->sectors, MAJOR(__entry->dev),
543                   MINOR(__entry->dev), __entry->offset)
544 );
545
546 DEFINE_EVENT(bch_fs, rescale_prios,
547         TP_PROTO(struct bch_fs *c),
548         TP_ARGS(c)
549 );
550
551 DECLARE_EVENT_CLASS(bucket_alloc,
552         TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
553         TP_ARGS(ca, reserve),
554
555         TP_STRUCT__entry(
556                 __array(char,                   uuid,   16)
557                 __field(enum alloc_reserve,     reserve   )
558         ),
559
560         TP_fast_assign(
561                 memcpy(__entry->uuid, ca->uuid.b, 16);
562                 __entry->reserve = reserve;
563         ),
564
565         TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
566 );
567
568 DEFINE_EVENT(bucket_alloc, bucket_alloc,
569         TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
570         TP_ARGS(ca, reserve)
571 );
572
573 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
574         TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
575         TP_ARGS(ca, reserve)
576 );
577
578 TRACE_EVENT(freelist_empty_fail,
579         TP_PROTO(struct bch_fs *c, enum alloc_reserve reserve,
580                  struct closure *cl),
581         TP_ARGS(c, reserve, cl),
582
583         TP_STRUCT__entry(
584                 __array(char,                   uuid,   16      )
585                 __field(enum alloc_reserve,     reserve         )
586                 __field(struct closure *,       cl              )
587         ),
588
589         TP_fast_assign(
590                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
591                 __entry->reserve = reserve;
592                 __entry->cl = cl;
593         ),
594
595         TP_printk("%pU reserve %d cl %p", __entry->uuid, __entry->reserve,
596                   __entry->cl)
597 );
598
599 DECLARE_EVENT_CLASS(open_bucket_alloc,
600         TP_PROTO(struct bch_fs *c, struct closure *cl),
601         TP_ARGS(c, cl),
602
603         TP_STRUCT__entry(
604                 __array(char,                   uuid,   16      )
605                 __field(struct closure *,       cl              )
606         ),
607
608         TP_fast_assign(
609                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
610                 __entry->cl = cl;
611         ),
612
613         TP_printk("%pU cl %p",
614                   __entry->uuid, __entry->cl)
615 );
616
617 DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc,
618         TP_PROTO(struct bch_fs *c, struct closure *cl),
619         TP_ARGS(c, cl)
620 );
621
622 DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc_fail,
623         TP_PROTO(struct bch_fs *c, struct closure *cl),
624         TP_ARGS(c, cl)
625 );
626
627 /* Moving IO */
628
629 DECLARE_EVENT_CLASS(moving_io,
630         TP_PROTO(struct bkey *k),
631         TP_ARGS(k),
632
633         TP_STRUCT__entry(
634                 __field(__u32,          inode                   )
635                 __field(__u64,          offset                  )
636                 __field(__u32,          sectors                 )
637         ),
638
639         TP_fast_assign(
640                 __entry->inode          = k->p.inode;
641                 __entry->offset         = k->p.offset;
642                 __entry->sectors        = k->size;
643         ),
644
645         TP_printk("%u:%llu sectors %u",
646                   __entry->inode, __entry->offset, __entry->sectors)
647 );
648
649 DEFINE_EVENT(moving_io, move_read,
650         TP_PROTO(struct bkey *k),
651         TP_ARGS(k)
652 );
653
654 DEFINE_EVENT(moving_io, move_read_done,
655         TP_PROTO(struct bkey *k),
656         TP_ARGS(k)
657 );
658
659 DEFINE_EVENT(moving_io, move_write,
660         TP_PROTO(struct bkey *k),
661         TP_ARGS(k)
662 );
663
664 DEFINE_EVENT(moving_io, copy_collision,
665         TP_PROTO(struct bkey *k),
666         TP_ARGS(k)
667 );
668
669 /* Copy GC */
670
671 DEFINE_EVENT(page_alloc_fail, moving_gc_alloc_fail,
672         TP_PROTO(struct bch_fs *c, u64 size),
673         TP_ARGS(c, size)
674 );
675
676 DEFINE_EVENT(bch_dev, moving_gc_start,
677         TP_PROTO(struct bch_dev *ca),
678         TP_ARGS(ca)
679 );
680
681 TRACE_EVENT(moving_gc_end,
682         TP_PROTO(struct bch_dev *ca, u64 sectors_moved, u64 keys_moved,
683                 u64 buckets_moved),
684         TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
685
686         TP_STRUCT__entry(
687                 __array(char,           uuid,   16      )
688                 __field(u64,            sectors_moved   )
689                 __field(u64,            keys_moved      )
690                 __field(u64,            buckets_moved   )
691         ),
692
693         TP_fast_assign(
694                 memcpy(__entry->uuid, ca->uuid.b, 16);
695                 __entry->sectors_moved = sectors_moved;
696                 __entry->keys_moved = keys_moved;
697                 __entry->buckets_moved = buckets_moved;
698         ),
699
700         TP_printk("%pU sectors_moved %llu keys_moved %llu buckets_moved %llu",
701                 __entry->uuid, __entry->sectors_moved, __entry->keys_moved,
702                 __entry->buckets_moved)
703 );
704
705 DEFINE_EVENT(bkey, gc_copy,
706         TP_PROTO(const struct bkey *k),
707         TP_ARGS(k)
708 );
709
710 /* Tiering */
711
712 DEFINE_EVENT(page_alloc_fail, tiering_alloc_fail,
713         TP_PROTO(struct bch_fs *c, u64 size),
714         TP_ARGS(c, size)
715 );
716
717 DEFINE_EVENT(bch_fs, tiering_start,
718         TP_PROTO(struct bch_fs *c),
719         TP_ARGS(c)
720 );
721
722 TRACE_EVENT(tiering_end,
723         TP_PROTO(struct bch_fs *c, u64 sectors_moved,
724                 u64 keys_moved),
725         TP_ARGS(c, sectors_moved, keys_moved),
726
727         TP_STRUCT__entry(
728                 __array(char,           uuid,   16      )
729                 __field(u64,            sectors_moved   )
730                 __field(u64,            keys_moved      )
731         ),
732
733         TP_fast_assign(
734                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
735                 __entry->sectors_moved = sectors_moved;
736                 __entry->keys_moved = keys_moved;
737         ),
738
739         TP_printk("%pU sectors_moved %llu keys_moved %llu",
740                 __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
741 );
742
743 DEFINE_EVENT(bkey, tiering_copy,
744         TP_PROTO(const struct bkey *k),
745         TP_ARGS(k)
746 );
747
748 #endif /* _TRACE_BCACHE_H */
749
750 /* This part must be outside protection */
751 #include <trace/define_trace.h>