2 #define TRACE_SYSTEM bcachefs
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
7 #include <linux/tracepoint.h>
9 DECLARE_EVENT_CLASS(bpos,
10 TP_PROTO(struct bpos p),
19 __entry->inode = p.inode;
20 __entry->offset = p.offset;
23 TP_printk("%llu:%llu", __entry->inode, __entry->offset)
26 DECLARE_EVENT_CLASS(bkey,
27 TP_PROTO(const struct bkey *k),
37 __entry->inode = k->p.inode;
38 __entry->offset = k->p.offset;
39 __entry->size = k->size;
42 TP_printk("%llu:%llu len %u", __entry->inode,
43 __entry->offset, __entry->size)
46 DECLARE_EVENT_CLASS(bch_dev,
47 TP_PROTO(struct bch_dev *ca),
51 __array(char, uuid, 16 )
52 __field(unsigned, tier )
56 memcpy(__entry->uuid, ca->uuid.b, 16);
57 __entry->tier = ca->mi.tier;
60 TP_printk("%pU tier %u", __entry->uuid, __entry->tier)
63 DECLARE_EVENT_CLASS(bch_fs,
64 TP_PROTO(struct bch_fs *c),
68 __array(char, uuid, 16 )
72 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
75 TP_printk("%pU", __entry->uuid)
78 DECLARE_EVENT_CLASS(bio,
79 TP_PROTO(struct bio *bio),
84 __field(sector_t, sector )
85 __field(unsigned int, nr_sector )
86 __array(char, rwbs, 6 )
90 __entry->dev = bio->bi_bdev->bd_dev;
91 __entry->sector = bio->bi_iter.bi_sector;
92 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
93 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
96 TP_printk("%d,%d %s %llu + %u",
97 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
98 (unsigned long long)__entry->sector, __entry->nr_sector)
101 DECLARE_EVENT_CLASS(page_alloc_fail,
102 TP_PROTO(struct bch_fs *c, u64 size),
106 __array(char, uuid, 16 )
111 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
112 __entry->size = size;
115 TP_printk("%pU size %llu", __entry->uuid, __entry->size)
120 DEFINE_EVENT(bio, read_split,
121 TP_PROTO(struct bio *bio),
125 DEFINE_EVENT(bio, read_bounce,
126 TP_PROTO(struct bio *bio),
130 DEFINE_EVENT(bio, read_retry,
131 TP_PROTO(struct bio *bio),
135 DEFINE_EVENT(bio, promote,
136 TP_PROTO(struct bio *bio),
140 TRACE_EVENT(write_throttle,
141 TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, u64 delay),
142 TP_ARGS(c, inode, bio, delay),
145 __array(char, uuid, 16 )
147 __field(sector_t, sector )
148 __field(unsigned int, nr_sector )
149 __array(char, rwbs, 6 )
154 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
155 __entry->inode = inode;
156 __entry->sector = bio->bi_iter.bi_sector;
157 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
158 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
159 __entry->delay = delay;
162 TP_printk("%pU inode %llu %s %llu + %u delay %llu",
163 __entry->uuid, __entry->inode,
164 __entry->rwbs, (unsigned long long)__entry->sector,
165 __entry->nr_sector, __entry->delay)
170 DEFINE_EVENT(bch_fs, journal_full,
171 TP_PROTO(struct bch_fs *c),
175 DEFINE_EVENT(bch_fs, journal_entry_full,
176 TP_PROTO(struct bch_fs *c),
180 DEFINE_EVENT(bio, journal_write,
181 TP_PROTO(struct bio *bio),
187 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
188 TP_PROTO(struct bpos p),
194 DECLARE_EVENT_CLASS(btree_node,
195 TP_PROTO(struct bch_fs *c, struct btree *b),
199 __array(char, uuid, 16 )
200 __field(u64, bucket )
204 __field(u64, offset )
208 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
209 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
210 __entry->level = b->level;
211 __entry->id = b->btree_id;
212 __entry->inode = b->key.k.p.inode;
213 __entry->offset = b->key.k.p.offset;
216 TP_printk("%pU bucket %llu(%u) id %u: %u:%llu",
217 __entry->uuid, __entry->bucket, __entry->level, __entry->id,
218 __entry->inode, __entry->offset)
221 DEFINE_EVENT(btree_node, btree_read,
222 TP_PROTO(struct bch_fs *c, struct btree *b),
226 TRACE_EVENT(btree_write,
227 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
228 TP_ARGS(b, bytes, sectors),
231 __field(enum bkey_type, type)
232 __field(unsigned, bytes )
233 __field(unsigned, sectors )
237 __entry->type = btree_node_type(b);
238 __entry->bytes = bytes;
239 __entry->sectors = sectors;
242 TP_printk("bkey type %u bytes %u sectors %u",
243 __entry->type , __entry->bytes, __entry->sectors)
246 DEFINE_EVENT(btree_node, btree_node_alloc,
247 TP_PROTO(struct bch_fs *c, struct btree *b),
251 DEFINE_EVENT(btree_node, btree_node_free,
252 TP_PROTO(struct bch_fs *c, struct btree *b),
256 TRACE_EVENT(btree_node_reap,
257 TP_PROTO(struct bch_fs *c, struct btree *b, int ret),
261 __field(u64, bucket )
266 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
270 TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret)
273 DECLARE_EVENT_CLASS(btree_node_cannibalize_lock,
274 TP_PROTO(struct bch_fs *c),
278 __array(char, uuid, 16 )
282 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
285 TP_printk("%pU", __entry->uuid)
288 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail,
289 TP_PROTO(struct bch_fs *c),
293 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock,
294 TP_PROTO(struct bch_fs *c),
298 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize,
299 TP_PROTO(struct bch_fs *c),
303 DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
304 TP_PROTO(struct bch_fs *c),
308 TRACE_EVENT(btree_reserve_get_fail,
309 TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
310 TP_ARGS(c, required, cl),
313 __array(char, uuid, 16 )
314 __field(size_t, required )
315 __field(struct closure *, cl )
319 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
320 __entry->required = required;
324 TP_printk("%pU required %zu by %p", __entry->uuid,
325 __entry->required, __entry->cl)
328 TRACE_EVENT(btree_insert_key,
329 TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
333 __field(u64, b_bucket )
334 __field(u64, b_offset )
335 __field(u64, offset )
336 __field(u32, b_inode )
344 __entry->b_bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
345 __entry->level = b->level;
346 __entry->id = b->btree_id;
347 __entry->b_inode = b->key.k.p.inode;
348 __entry->b_offset = b->key.k.p.offset;
349 __entry->inode = k->k.p.inode;
350 __entry->offset = k->k.p.offset;
351 __entry->size = k->k.size;
354 TP_printk("bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
355 __entry->b_bucket, __entry->level, __entry->id,
356 __entry->b_inode, __entry->b_offset,
357 __entry->inode, __entry->offset, __entry->size)
360 DECLARE_EVENT_CLASS(btree_split,
361 TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
365 __field(u64, bucket )
369 __field(u64, offset )
374 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
375 __entry->level = b->level;
376 __entry->id = b->btree_id;
377 __entry->inode = b->key.k.p.inode;
378 __entry->offset = b->key.k.p.offset;
379 __entry->keys = keys;
382 TP_printk("bucket %llu(%u) id %u: %u:%llu keys %u",
383 __entry->bucket, __entry->level, __entry->id,
384 __entry->inode, __entry->offset, __entry->keys)
387 DEFINE_EVENT(btree_split, btree_node_split,
388 TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
392 DEFINE_EVENT(btree_split, btree_node_compact,
393 TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
397 DEFINE_EVENT(btree_node, btree_set_root,
398 TP_PROTO(struct bch_fs *c, struct btree *b),
402 /* Garbage collection */
404 TRACE_EVENT(btree_gc_coalesce,
405 TP_PROTO(struct bch_fs *c, struct btree *b, unsigned nodes),
406 TP_ARGS(c, b, nodes),
409 __field(u64, bucket )
413 __field(u64, offset )
414 __field(unsigned, nodes )
418 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
419 __entry->level = b->level;
420 __entry->id = b->btree_id;
421 __entry->inode = b->key.k.p.inode;
422 __entry->offset = b->key.k.p.offset;
423 __entry->nodes = nodes;
426 TP_printk("bucket %llu(%u) id %u: %u:%llu nodes %u",
427 __entry->bucket, __entry->level, __entry->id,
428 __entry->inode, __entry->offset, __entry->nodes)
431 TRACE_EVENT(btree_gc_coalesce_fail,
432 TP_PROTO(struct bch_fs *c, int reason),
437 __array(char, uuid, 16 )
441 __entry->reason = reason;
442 memcpy(__entry->uuid, c->disk_sb->user_uuid.b, 16);
445 TP_printk("%pU: %u", __entry->uuid, __entry->reason)
448 DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
449 TP_PROTO(struct bch_fs *c, struct btree *b),
453 DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
454 TP_PROTO(struct bch_fs *c, struct btree *b),
458 DEFINE_EVENT(bch_fs, gc_start,
459 TP_PROTO(struct bch_fs *c),
463 DEFINE_EVENT(bch_fs, gc_end,
464 TP_PROTO(struct bch_fs *c),
468 DEFINE_EVENT(bch_fs, gc_coalesce_start,
469 TP_PROTO(struct bch_fs *c),
473 DEFINE_EVENT(bch_fs, gc_coalesce_end,
474 TP_PROTO(struct bch_fs *c),
478 DEFINE_EVENT(bch_dev, sectors_saturated,
479 TP_PROTO(struct bch_dev *ca),
483 DEFINE_EVENT(bch_fs, gc_sectors_saturated,
484 TP_PROTO(struct bch_fs *c),
488 DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
489 TP_PROTO(struct bch_fs *c),
495 TRACE_EVENT(alloc_batch,
496 TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
497 TP_ARGS(ca, free, total),
500 __array(char, uuid, 16 )
501 __field(size_t, free )
502 __field(size_t, total )
506 memcpy(__entry->uuid, ca->uuid.b, 16);
507 __entry->free = free;
508 __entry->total = total;
511 TP_printk("%pU free %zu total %zu",
512 __entry->uuid, __entry->free, __entry->total)
515 DEFINE_EVENT(bch_dev, prio_write_start,
516 TP_PROTO(struct bch_dev *ca),
520 DEFINE_EVENT(bch_dev, prio_write_end,
521 TP_PROTO(struct bch_dev *ca),
525 TRACE_EVENT(invalidate,
526 TP_PROTO(struct bch_dev *ca, size_t bucket, unsigned sectors),
527 TP_ARGS(ca, bucket, sectors),
530 __field(unsigned, sectors )
532 __field(__u64, offset )
536 __entry->dev = ca->disk_sb.bdev->bd_dev;
537 __entry->offset = bucket << ca->bucket_bits;
538 __entry->sectors = sectors;
541 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
542 __entry->sectors, MAJOR(__entry->dev),
543 MINOR(__entry->dev), __entry->offset)
546 DEFINE_EVENT(bch_fs, rescale_prios,
547 TP_PROTO(struct bch_fs *c),
551 DECLARE_EVENT_CLASS(bucket_alloc,
552 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
553 TP_ARGS(ca, reserve),
556 __array(char, uuid, 16)
557 __field(enum alloc_reserve, reserve )
561 memcpy(__entry->uuid, ca->uuid.b, 16);
562 __entry->reserve = reserve;
565 TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
568 DEFINE_EVENT(bucket_alloc, bucket_alloc,
569 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
573 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
574 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
578 TRACE_EVENT(freelist_empty_fail,
579 TP_PROTO(struct bch_fs *c, enum alloc_reserve reserve,
581 TP_ARGS(c, reserve, cl),
584 __array(char, uuid, 16 )
585 __field(enum alloc_reserve, reserve )
586 __field(struct closure *, cl )
590 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
591 __entry->reserve = reserve;
595 TP_printk("%pU reserve %d cl %p", __entry->uuid, __entry->reserve,
599 DECLARE_EVENT_CLASS(open_bucket_alloc,
600 TP_PROTO(struct bch_fs *c, struct closure *cl),
604 __array(char, uuid, 16 )
605 __field(struct closure *, cl )
609 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
613 TP_printk("%pU cl %p",
614 __entry->uuid, __entry->cl)
617 DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc,
618 TP_PROTO(struct bch_fs *c, struct closure *cl),
622 DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc_fail,
623 TP_PROTO(struct bch_fs *c, struct closure *cl),
629 DECLARE_EVENT_CLASS(moving_io,
630 TP_PROTO(struct bkey *k),
634 __field(__u32, inode )
635 __field(__u64, offset )
636 __field(__u32, sectors )
640 __entry->inode = k->p.inode;
641 __entry->offset = k->p.offset;
642 __entry->sectors = k->size;
645 TP_printk("%u:%llu sectors %u",
646 __entry->inode, __entry->offset, __entry->sectors)
649 DEFINE_EVENT(moving_io, move_read,
650 TP_PROTO(struct bkey *k),
654 DEFINE_EVENT(moving_io, move_read_done,
655 TP_PROTO(struct bkey *k),
659 DEFINE_EVENT(moving_io, move_write,
660 TP_PROTO(struct bkey *k),
664 DEFINE_EVENT(moving_io, copy_collision,
665 TP_PROTO(struct bkey *k),
671 DEFINE_EVENT(page_alloc_fail, moving_gc_alloc_fail,
672 TP_PROTO(struct bch_fs *c, u64 size),
676 DEFINE_EVENT(bch_dev, moving_gc_start,
677 TP_PROTO(struct bch_dev *ca),
681 TRACE_EVENT(moving_gc_end,
682 TP_PROTO(struct bch_dev *ca, u64 sectors_moved, u64 keys_moved,
684 TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
687 __array(char, uuid, 16 )
688 __field(u64, sectors_moved )
689 __field(u64, keys_moved )
690 __field(u64, buckets_moved )
694 memcpy(__entry->uuid, ca->uuid.b, 16);
695 __entry->sectors_moved = sectors_moved;
696 __entry->keys_moved = keys_moved;
697 __entry->buckets_moved = buckets_moved;
700 TP_printk("%pU sectors_moved %llu keys_moved %llu buckets_moved %llu",
701 __entry->uuid, __entry->sectors_moved, __entry->keys_moved,
702 __entry->buckets_moved)
705 DEFINE_EVENT(bkey, gc_copy,
706 TP_PROTO(const struct bkey *k),
712 DEFINE_EVENT(page_alloc_fail, tiering_alloc_fail,
713 TP_PROTO(struct bch_fs *c, u64 size),
717 DEFINE_EVENT(bch_fs, tiering_start,
718 TP_PROTO(struct bch_fs *c),
722 TRACE_EVENT(tiering_end,
723 TP_PROTO(struct bch_fs *c, u64 sectors_moved,
725 TP_ARGS(c, sectors_moved, keys_moved),
728 __array(char, uuid, 16 )
729 __field(u64, sectors_moved )
730 __field(u64, keys_moved )
734 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
735 __entry->sectors_moved = sectors_moved;
736 __entry->keys_moved = keys_moved;
739 TP_printk("%pU sectors_moved %llu keys_moved %llu",
740 __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
743 DEFINE_EVENT(bkey, tiering_copy,
744 TP_PROTO(const struct bkey *k),
748 #endif /* _TRACE_BCACHE_H */
750 /* This part must be outside protection */
751 #include <trace/define_trace.h>