]> git.sesse.net Git - bcachefs-tools-debian/blob - include/trace/events/bcachefs.h
update bcache sources
[bcachefs-tools-debian] / include / trace / events / bcachefs.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM bcachefs
3
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
6
7 #include <linux/tracepoint.h>
8
9 DECLARE_EVENT_CLASS(bpos,
10         TP_PROTO(struct bpos p),
11         TP_ARGS(p),
12
13         TP_STRUCT__entry(
14                 __field(u64,    inode                           )
15                 __field(u64,    offset                          )
16         ),
17
18         TP_fast_assign(
19                 __entry->inode  = p.inode;
20                 __entry->offset = p.offset;
21         ),
22
23         TP_printk("%llu:%llu", __entry->inode, __entry->offset)
24 );
25
26 DECLARE_EVENT_CLASS(bkey,
27         TP_PROTO(const struct bkey *k),
28         TP_ARGS(k),
29
30         TP_STRUCT__entry(
31                 __field(u64,    inode                           )
32                 __field(u64,    offset                          )
33                 __field(u32,    size                            )
34         ),
35
36         TP_fast_assign(
37                 __entry->inode  = k->p.inode;
38                 __entry->offset = k->p.offset;
39                 __entry->size   = k->size;
40         ),
41
42         TP_printk("%llu:%llu len %u", __entry->inode,
43                   __entry->offset, __entry->size)
44 );
45
46 DECLARE_EVENT_CLASS(bch_dev,
47         TP_PROTO(struct bch_dev *ca),
48         TP_ARGS(ca),
49
50         TP_STRUCT__entry(
51                 __array(char,           uuid,   16      )
52                 __field(unsigned,       tier            )
53         ),
54
55         TP_fast_assign(
56                 memcpy(__entry->uuid, ca->uuid.b, 16);
57                 __entry->tier = ca->mi.tier;
58         ),
59
60         TP_printk("%pU tier %u", __entry->uuid, __entry->tier)
61 );
62
63 DECLARE_EVENT_CLASS(bch_fs,
64         TP_PROTO(struct bch_fs *c),
65         TP_ARGS(c),
66
67         TP_STRUCT__entry(
68                 __array(char,           uuid,   16 )
69         ),
70
71         TP_fast_assign(
72                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
73         ),
74
75         TP_printk("%pU", __entry->uuid)
76 );
77
78 DECLARE_EVENT_CLASS(bio,
79         TP_PROTO(struct bio *bio),
80         TP_ARGS(bio),
81
82         TP_STRUCT__entry(
83                 __field(dev_t,          dev                     )
84                 __field(sector_t,       sector                  )
85                 __field(unsigned int,   nr_sector               )
86                 __array(char,           rwbs,   6               )
87         ),
88
89         TP_fast_assign(
90                 __entry->dev            = bio->bi_bdev->bd_dev;
91                 __entry->sector         = bio->bi_iter.bi_sector;
92                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
93                 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
94                               bio->bi_iter.bi_size);
95         ),
96
97         TP_printk("%d,%d  %s %llu + %u",
98                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
99                   (unsigned long long)__entry->sector, __entry->nr_sector)
100 );
101
102 DECLARE_EVENT_CLASS(page_alloc_fail,
103         TP_PROTO(struct bch_fs *c, u64 size),
104         TP_ARGS(c, size),
105
106         TP_STRUCT__entry(
107                 __array(char,           uuid,   16      )
108                 __field(u64,            size            )
109         ),
110
111         TP_fast_assign(
112                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
113                 __entry->size = size;
114         ),
115
116         TP_printk("%pU size %llu", __entry->uuid, __entry->size)
117 );
118
119 /* io.c: */
120
121 DEFINE_EVENT(bio, read_split,
122         TP_PROTO(struct bio *bio),
123         TP_ARGS(bio)
124 );
125
126 DEFINE_EVENT(bio, read_bounce,
127         TP_PROTO(struct bio *bio),
128         TP_ARGS(bio)
129 );
130
131 DEFINE_EVENT(bio, read_retry,
132         TP_PROTO(struct bio *bio),
133         TP_ARGS(bio)
134 );
135
136 DEFINE_EVENT(bio, promote,
137         TP_PROTO(struct bio *bio),
138         TP_ARGS(bio)
139 );
140
141 TRACE_EVENT(write_throttle,
142         TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, u64 delay),
143         TP_ARGS(c, inode, bio, delay),
144
145         TP_STRUCT__entry(
146                 __array(char,           uuid,   16              )
147                 __field(u64,            inode                   )
148                 __field(sector_t,       sector                  )
149                 __field(unsigned int,   nr_sector               )
150                 __array(char,           rwbs,   6               )
151                 __field(u64,            delay                   )
152         ),
153
154         TP_fast_assign(
155                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
156                 __entry->inode          = inode;
157                 __entry->sector         = bio->bi_iter.bi_sector;
158                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
159                 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
160                               bio->bi_iter.bi_size);
161                 __entry->delay          = delay;
162         ),
163
164         TP_printk("%pU inode %llu  %s %llu + %u delay %llu",
165                   __entry->uuid, __entry->inode,
166                   __entry->rwbs, (unsigned long long)__entry->sector,
167                   __entry->nr_sector, __entry->delay)
168 );
169
170 /* Journal */
171
172 DEFINE_EVENT(bch_fs, journal_full,
173         TP_PROTO(struct bch_fs *c),
174         TP_ARGS(c)
175 );
176
177 DEFINE_EVENT(bch_fs, journal_entry_full,
178         TP_PROTO(struct bch_fs *c),
179         TP_ARGS(c)
180 );
181
182 DEFINE_EVENT(bio, journal_write,
183         TP_PROTO(struct bio *bio),
184         TP_ARGS(bio)
185 );
186
187 /* bset.c: */
188
189 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
190         TP_PROTO(struct bpos p),
191         TP_ARGS(p)
192 );
193
194 /* Btree */
195
196 DECLARE_EVENT_CLASS(btree_node,
197         TP_PROTO(struct bch_fs *c, struct btree *b),
198         TP_ARGS(c, b),
199
200         TP_STRUCT__entry(
201                 __array(char,           uuid,           16      )
202                 __field(u64,            bucket                  )
203                 __field(u8,             level                   )
204                 __field(u8,             id                      )
205                 __field(u32,            inode                   )
206                 __field(u64,            offset                  )
207         ),
208
209         TP_fast_assign(
210                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
211                 __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
212                 __entry->level          = b->level;
213                 __entry->id             = b->btree_id;
214                 __entry->inode          = b->key.k.p.inode;
215                 __entry->offset         = b->key.k.p.offset;
216         ),
217
218         TP_printk("%pU bucket %llu(%u) id %u: %u:%llu",
219                   __entry->uuid, __entry->bucket, __entry->level, __entry->id,
220                   __entry->inode, __entry->offset)
221 );
222
223 DEFINE_EVENT(btree_node, btree_read,
224         TP_PROTO(struct bch_fs *c, struct btree *b),
225         TP_ARGS(c, b)
226 );
227
228 TRACE_EVENT(btree_write,
229         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
230         TP_ARGS(b, bytes, sectors),
231
232         TP_STRUCT__entry(
233                 __field(enum bkey_type, type)
234                 __field(unsigned,       bytes                   )
235                 __field(unsigned,       sectors                 )
236         ),
237
238         TP_fast_assign(
239                 __entry->type   = btree_node_type(b);
240                 __entry->bytes  = bytes;
241                 __entry->sectors = sectors;
242         ),
243
244         TP_printk("bkey type %u bytes %u sectors %u",
245                   __entry->type , __entry->bytes, __entry->sectors)
246 );
247
248 DEFINE_EVENT(btree_node, btree_node_alloc,
249         TP_PROTO(struct bch_fs *c, struct btree *b),
250         TP_ARGS(c, b)
251 );
252
253 DEFINE_EVENT(btree_node, btree_node_free,
254         TP_PROTO(struct bch_fs *c, struct btree *b),
255         TP_ARGS(c, b)
256 );
257
258 TRACE_EVENT(btree_node_reap,
259         TP_PROTO(struct bch_fs *c, struct btree *b, int ret),
260         TP_ARGS(c, b, ret),
261
262         TP_STRUCT__entry(
263                 __field(u64,                    bucket          )
264                 __field(int,                    ret             )
265         ),
266
267         TP_fast_assign(
268                 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
269                 __entry->ret = ret;
270         ),
271
272         TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret)
273 );
274
275 DECLARE_EVENT_CLASS(btree_node_cannibalize_lock,
276         TP_PROTO(struct bch_fs *c),
277         TP_ARGS(c),
278
279         TP_STRUCT__entry(
280                 __array(char,                   uuid,   16      )
281         ),
282
283         TP_fast_assign(
284                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
285         ),
286
287         TP_printk("%pU", __entry->uuid)
288 );
289
290 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail,
291         TP_PROTO(struct bch_fs *c),
292         TP_ARGS(c)
293 );
294
295 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock,
296         TP_PROTO(struct bch_fs *c),
297         TP_ARGS(c)
298 );
299
300 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize,
301         TP_PROTO(struct bch_fs *c),
302         TP_ARGS(c)
303 );
304
305 DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
306         TP_PROTO(struct bch_fs *c),
307         TP_ARGS(c)
308 );
309
310 TRACE_EVENT(btree_reserve_get_fail,
311         TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
312         TP_ARGS(c, required, cl),
313
314         TP_STRUCT__entry(
315                 __array(char,                   uuid,   16      )
316                 __field(size_t,                 required        )
317                 __field(struct closure *,       cl              )
318         ),
319
320         TP_fast_assign(
321                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
322                 __entry->required = required;
323                 __entry->cl = cl;
324         ),
325
326         TP_printk("%pU required %zu by %p", __entry->uuid,
327                   __entry->required, __entry->cl)
328 );
329
330 TRACE_EVENT(btree_insert_key,
331         TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
332         TP_ARGS(c, b, k),
333
334         TP_STRUCT__entry(
335                 __field(u64,            b_bucket                )
336                 __field(u64,            b_offset                )
337                 __field(u64,            offset                  )
338                 __field(u32,            b_inode                 )
339                 __field(u32,            inode                   )
340                 __field(u32,            size                    )
341                 __field(u8,             level                   )
342                 __field(u8,             id                      )
343         ),
344
345         TP_fast_assign(
346                 __entry->b_bucket       = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
347                 __entry->level          = b->level;
348                 __entry->id             = b->btree_id;
349                 __entry->b_inode        = b->key.k.p.inode;
350                 __entry->b_offset       = b->key.k.p.offset;
351                 __entry->inode          = k->k.p.inode;
352                 __entry->offset         = k->k.p.offset;
353                 __entry->size           = k->k.size;
354         ),
355
356         TP_printk("bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
357                   __entry->b_bucket, __entry->level, __entry->id,
358                   __entry->b_inode, __entry->b_offset,
359                   __entry->inode, __entry->offset, __entry->size)
360 );
361
362 DECLARE_EVENT_CLASS(btree_split,
363         TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
364         TP_ARGS(c, b, keys),
365
366         TP_STRUCT__entry(
367                 __field(u64,            bucket                  )
368                 __field(u8,             level                   )
369                 __field(u8,             id                      )
370                 __field(u32,            inode                   )
371                 __field(u64,            offset                  )
372                 __field(u32,            keys                    )
373         ),
374
375         TP_fast_assign(
376                 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
377                 __entry->level  = b->level;
378                 __entry->id     = b->btree_id;
379                 __entry->inode  = b->key.k.p.inode;
380                 __entry->offset = b->key.k.p.offset;
381                 __entry->keys   = keys;
382         ),
383
384         TP_printk("bucket %llu(%u) id %u: %u:%llu keys %u",
385                   __entry->bucket, __entry->level, __entry->id,
386                   __entry->inode, __entry->offset, __entry->keys)
387 );
388
389 DEFINE_EVENT(btree_split, btree_node_split,
390         TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
391         TP_ARGS(c, b, keys)
392 );
393
394 DEFINE_EVENT(btree_split, btree_node_compact,
395         TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
396         TP_ARGS(c, b, keys)
397 );
398
399 DEFINE_EVENT(btree_node, btree_set_root,
400         TP_PROTO(struct bch_fs *c, struct btree *b),
401         TP_ARGS(c, b)
402 );
403
404 /* Garbage collection */
405
406 TRACE_EVENT(btree_gc_coalesce,
407         TP_PROTO(struct bch_fs *c, struct btree *b, unsigned nodes),
408         TP_ARGS(c, b, nodes),
409
410         TP_STRUCT__entry(
411                 __field(u64,            bucket                  )
412                 __field(u8,             level                   )
413                 __field(u8,             id                      )
414                 __field(u32,            inode                   )
415                 __field(u64,            offset                  )
416                 __field(unsigned,       nodes                   )
417         ),
418
419         TP_fast_assign(
420                 __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
421                 __entry->level          = b->level;
422                 __entry->id             = b->btree_id;
423                 __entry->inode          = b->key.k.p.inode;
424                 __entry->offset         = b->key.k.p.offset;
425                 __entry->nodes          = nodes;
426         ),
427
428         TP_printk("bucket %llu(%u) id %u: %u:%llu nodes %u",
429                   __entry->bucket, __entry->level, __entry->id,
430                   __entry->inode, __entry->offset, __entry->nodes)
431 );
432
433 TRACE_EVENT(btree_gc_coalesce_fail,
434         TP_PROTO(struct bch_fs *c, int reason),
435         TP_ARGS(c, reason),
436
437         TP_STRUCT__entry(
438                 __field(u8,             reason                  )
439                 __array(char,           uuid,   16              )
440         ),
441
442         TP_fast_assign(
443                 __entry->reason         = reason;
444                 memcpy(__entry->uuid, c->disk_sb->user_uuid.b, 16);
445         ),
446
447         TP_printk("%pU: %u", __entry->uuid, __entry->reason)
448 );
449
450 DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
451         TP_PROTO(struct bch_fs *c, struct btree *b),
452         TP_ARGS(c, b)
453 );
454
455 DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
456         TP_PROTO(struct bch_fs *c, struct btree *b),
457         TP_ARGS(c, b)
458 );
459
460 DEFINE_EVENT(bch_fs, gc_start,
461         TP_PROTO(struct bch_fs *c),
462         TP_ARGS(c)
463 );
464
465 DEFINE_EVENT(bch_fs, gc_end,
466         TP_PROTO(struct bch_fs *c),
467         TP_ARGS(c)
468 );
469
470 DEFINE_EVENT(bch_fs, gc_coalesce_start,
471         TP_PROTO(struct bch_fs *c),
472         TP_ARGS(c)
473 );
474
475 DEFINE_EVENT(bch_fs, gc_coalesce_end,
476         TP_PROTO(struct bch_fs *c),
477         TP_ARGS(c)
478 );
479
480 DEFINE_EVENT(bch_dev, sectors_saturated,
481         TP_PROTO(struct bch_dev *ca),
482         TP_ARGS(ca)
483 );
484
485 DEFINE_EVENT(bch_fs, gc_sectors_saturated,
486         TP_PROTO(struct bch_fs *c),
487         TP_ARGS(c)
488 );
489
490 DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
491         TP_PROTO(struct bch_fs *c),
492         TP_ARGS(c)
493 );
494
495 /* Allocator */
496
497 TRACE_EVENT(alloc_batch,
498         TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
499         TP_ARGS(ca, free, total),
500
501         TP_STRUCT__entry(
502                 __array(char,           uuid,   16      )
503                 __field(size_t,         free            )
504                 __field(size_t,         total           )
505         ),
506
507         TP_fast_assign(
508                 memcpy(__entry->uuid, ca->uuid.b, 16);
509                 __entry->free = free;
510                 __entry->total = total;
511         ),
512
513         TP_printk("%pU free %zu total %zu",
514                 __entry->uuid, __entry->free, __entry->total)
515 );
516
517 DEFINE_EVENT(bch_dev, prio_write_start,
518         TP_PROTO(struct bch_dev *ca),
519         TP_ARGS(ca)
520 );
521
522 DEFINE_EVENT(bch_dev, prio_write_end,
523         TP_PROTO(struct bch_dev *ca),
524         TP_ARGS(ca)
525 );
526
527 TRACE_EVENT(invalidate,
528         TP_PROTO(struct bch_dev *ca, size_t bucket, unsigned sectors),
529         TP_ARGS(ca, bucket, sectors),
530
531         TP_STRUCT__entry(
532                 __field(unsigned,       sectors                 )
533                 __field(dev_t,          dev                     )
534                 __field(__u64,          offset                  )
535         ),
536
537         TP_fast_assign(
538                 __entry->dev            = ca->disk_sb.bdev->bd_dev;
539                 __entry->offset         = bucket << ca->bucket_bits;
540                 __entry->sectors        = sectors;
541         ),
542
543         TP_printk("invalidated %u sectors at %d,%d sector=%llu",
544                   __entry->sectors, MAJOR(__entry->dev),
545                   MINOR(__entry->dev), __entry->offset)
546 );
547
548 DEFINE_EVENT(bch_fs, rescale_prios,
549         TP_PROTO(struct bch_fs *c),
550         TP_ARGS(c)
551 );
552
553 DECLARE_EVENT_CLASS(bucket_alloc,
554         TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
555         TP_ARGS(ca, reserve),
556
557         TP_STRUCT__entry(
558                 __array(char,                   uuid,   16)
559                 __field(enum alloc_reserve,     reserve   )
560         ),
561
562         TP_fast_assign(
563                 memcpy(__entry->uuid, ca->uuid.b, 16);
564                 __entry->reserve = reserve;
565         ),
566
567         TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
568 );
569
570 DEFINE_EVENT(bucket_alloc, bucket_alloc,
571         TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
572         TP_ARGS(ca, reserve)
573 );
574
575 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
576         TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
577         TP_ARGS(ca, reserve)
578 );
579
580 TRACE_EVENT(freelist_empty_fail,
581         TP_PROTO(struct bch_fs *c, enum alloc_reserve reserve,
582                  struct closure *cl),
583         TP_ARGS(c, reserve, cl),
584
585         TP_STRUCT__entry(
586                 __array(char,                   uuid,   16      )
587                 __field(enum alloc_reserve,     reserve         )
588                 __field(struct closure *,       cl              )
589         ),
590
591         TP_fast_assign(
592                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
593                 __entry->reserve = reserve;
594                 __entry->cl = cl;
595         ),
596
597         TP_printk("%pU reserve %d cl %p", __entry->uuid, __entry->reserve,
598                   __entry->cl)
599 );
600
601 DECLARE_EVENT_CLASS(open_bucket_alloc,
602         TP_PROTO(struct bch_fs *c, struct closure *cl),
603         TP_ARGS(c, cl),
604
605         TP_STRUCT__entry(
606                 __array(char,                   uuid,   16      )
607                 __field(struct closure *,       cl              )
608         ),
609
610         TP_fast_assign(
611                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
612                 __entry->cl = cl;
613         ),
614
615         TP_printk("%pU cl %p",
616                   __entry->uuid, __entry->cl)
617 );
618
619 DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc,
620         TP_PROTO(struct bch_fs *c, struct closure *cl),
621         TP_ARGS(c, cl)
622 );
623
624 DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc_fail,
625         TP_PROTO(struct bch_fs *c, struct closure *cl),
626         TP_ARGS(c, cl)
627 );
628
629 /* Moving IO */
630
631 DECLARE_EVENT_CLASS(moving_io,
632         TP_PROTO(struct bkey *k),
633         TP_ARGS(k),
634
635         TP_STRUCT__entry(
636                 __field(__u32,          inode                   )
637                 __field(__u64,          offset                  )
638                 __field(__u32,          sectors                 )
639         ),
640
641         TP_fast_assign(
642                 __entry->inode          = k->p.inode;
643                 __entry->offset         = k->p.offset;
644                 __entry->sectors        = k->size;
645         ),
646
647         TP_printk("%u:%llu sectors %u",
648                   __entry->inode, __entry->offset, __entry->sectors)
649 );
650
651 DEFINE_EVENT(moving_io, move_read,
652         TP_PROTO(struct bkey *k),
653         TP_ARGS(k)
654 );
655
656 DEFINE_EVENT(moving_io, move_read_done,
657         TP_PROTO(struct bkey *k),
658         TP_ARGS(k)
659 );
660
661 DEFINE_EVENT(moving_io, move_write,
662         TP_PROTO(struct bkey *k),
663         TP_ARGS(k)
664 );
665
666 DEFINE_EVENT(moving_io, copy_collision,
667         TP_PROTO(struct bkey *k),
668         TP_ARGS(k)
669 );
670
671 /* Copy GC */
672
673 DEFINE_EVENT(page_alloc_fail, moving_gc_alloc_fail,
674         TP_PROTO(struct bch_fs *c, u64 size),
675         TP_ARGS(c, size)
676 );
677
678 DEFINE_EVENT(bch_dev, moving_gc_start,
679         TP_PROTO(struct bch_dev *ca),
680         TP_ARGS(ca)
681 );
682
683 TRACE_EVENT(moving_gc_end,
684         TP_PROTO(struct bch_dev *ca, u64 sectors_moved, u64 keys_moved,
685                 u64 buckets_moved),
686         TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
687
688         TP_STRUCT__entry(
689                 __array(char,           uuid,   16      )
690                 __field(u64,            sectors_moved   )
691                 __field(u64,            keys_moved      )
692                 __field(u64,            buckets_moved   )
693         ),
694
695         TP_fast_assign(
696                 memcpy(__entry->uuid, ca->uuid.b, 16);
697                 __entry->sectors_moved = sectors_moved;
698                 __entry->keys_moved = keys_moved;
699                 __entry->buckets_moved = buckets_moved;
700         ),
701
702         TP_printk("%pU sectors_moved %llu keys_moved %llu buckets_moved %llu",
703                 __entry->uuid, __entry->sectors_moved, __entry->keys_moved,
704                 __entry->buckets_moved)
705 );
706
707 DEFINE_EVENT(bkey, gc_copy,
708         TP_PROTO(const struct bkey *k),
709         TP_ARGS(k)
710 );
711
712 /* Tiering */
713
714 DEFINE_EVENT(page_alloc_fail, tiering_alloc_fail,
715         TP_PROTO(struct bch_fs *c, u64 size),
716         TP_ARGS(c, size)
717 );
718
719 DEFINE_EVENT(bch_fs, tiering_start,
720         TP_PROTO(struct bch_fs *c),
721         TP_ARGS(c)
722 );
723
724 TRACE_EVENT(tiering_end,
725         TP_PROTO(struct bch_fs *c, u64 sectors_moved,
726                 u64 keys_moved),
727         TP_ARGS(c, sectors_moved, keys_moved),
728
729         TP_STRUCT__entry(
730                 __array(char,           uuid,   16      )
731                 __field(u64,            sectors_moved   )
732                 __field(u64,            keys_moved      )
733         ),
734
735         TP_fast_assign(
736                 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
737                 __entry->sectors_moved = sectors_moved;
738                 __entry->keys_moved = keys_moved;
739         ),
740
741         TP_printk("%pU sectors_moved %llu keys_moved %llu",
742                 __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
743 );
744
745 DEFINE_EVENT(bkey, tiering_copy,
746         TP_PROTO(const struct bkey *k),
747         TP_ARGS(k)
748 );
749
750 #endif /* _TRACE_BCACHE_H */
751
752 /* This part must be outside protection */
753 #include <trace/define_trace.h>