2 #define TRACE_SYSTEM bcachefs
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
7 #include <linux/tracepoint.h>
9 DECLARE_EVENT_CLASS(bpos,
10 TP_PROTO(struct bpos p),
19 __entry->inode = p.inode;
20 __entry->offset = p.offset;
23 TP_printk("%llu:%llu", __entry->inode, __entry->offset)
26 DECLARE_EVENT_CLASS(bkey,
27 TP_PROTO(const struct bkey *k),
37 __entry->inode = k->p.inode;
38 __entry->offset = k->p.offset;
39 __entry->size = k->size;
42 TP_printk("%llu:%llu len %u", __entry->inode,
43 __entry->offset, __entry->size)
46 DECLARE_EVENT_CLASS(bch_dev,
47 TP_PROTO(struct bch_dev *ca),
51 __array(char, uuid, 16 )
52 __field(unsigned, tier )
56 memcpy(__entry->uuid, ca->uuid.b, 16);
57 __entry->tier = ca->mi.tier;
60 TP_printk("%pU tier %u", __entry->uuid, __entry->tier)
63 DECLARE_EVENT_CLASS(bch_fs,
64 TP_PROTO(struct bch_fs *c),
68 __array(char, uuid, 16 )
72 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
75 TP_printk("%pU", __entry->uuid)
78 DECLARE_EVENT_CLASS(bio,
79 TP_PROTO(struct bio *bio),
84 __field(sector_t, sector )
85 __field(unsigned int, nr_sector )
86 __array(char, rwbs, 6 )
90 __entry->dev = bio->bi_bdev->bd_dev;
91 __entry->sector = bio->bi_iter.bi_sector;
92 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
93 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
94 bio->bi_iter.bi_size);
97 TP_printk("%d,%d %s %llu + %u",
98 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
99 (unsigned long long)__entry->sector, __entry->nr_sector)
102 DECLARE_EVENT_CLASS(page_alloc_fail,
103 TP_PROTO(struct bch_fs *c, u64 size),
107 __array(char, uuid, 16 )
112 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
113 __entry->size = size;
116 TP_printk("%pU size %llu", __entry->uuid, __entry->size)
121 DEFINE_EVENT(bio, read_split,
122 TP_PROTO(struct bio *bio),
126 DEFINE_EVENT(bio, read_bounce,
127 TP_PROTO(struct bio *bio),
131 DEFINE_EVENT(bio, read_retry,
132 TP_PROTO(struct bio *bio),
136 DEFINE_EVENT(bio, promote,
137 TP_PROTO(struct bio *bio),
141 TRACE_EVENT(write_throttle,
142 TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, u64 delay),
143 TP_ARGS(c, inode, bio, delay),
146 __array(char, uuid, 16 )
148 __field(sector_t, sector )
149 __field(unsigned int, nr_sector )
150 __array(char, rwbs, 6 )
155 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
156 __entry->inode = inode;
157 __entry->sector = bio->bi_iter.bi_sector;
158 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
159 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
160 bio->bi_iter.bi_size);
161 __entry->delay = delay;
164 TP_printk("%pU inode %llu %s %llu + %u delay %llu",
165 __entry->uuid, __entry->inode,
166 __entry->rwbs, (unsigned long long)__entry->sector,
167 __entry->nr_sector, __entry->delay)
172 DEFINE_EVENT(bch_fs, journal_full,
173 TP_PROTO(struct bch_fs *c),
177 DEFINE_EVENT(bch_fs, journal_entry_full,
178 TP_PROTO(struct bch_fs *c),
182 DEFINE_EVENT(bio, journal_write,
183 TP_PROTO(struct bio *bio),
189 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
190 TP_PROTO(struct bpos p),
196 DECLARE_EVENT_CLASS(btree_node,
197 TP_PROTO(struct bch_fs *c, struct btree *b),
201 __array(char, uuid, 16 )
202 __field(u64, bucket )
206 __field(u64, offset )
210 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
211 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
212 __entry->level = b->level;
213 __entry->id = b->btree_id;
214 __entry->inode = b->key.k.p.inode;
215 __entry->offset = b->key.k.p.offset;
218 TP_printk("%pU bucket %llu(%u) id %u: %u:%llu",
219 __entry->uuid, __entry->bucket, __entry->level, __entry->id,
220 __entry->inode, __entry->offset)
223 DEFINE_EVENT(btree_node, btree_read,
224 TP_PROTO(struct bch_fs *c, struct btree *b),
228 TRACE_EVENT(btree_write,
229 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
230 TP_ARGS(b, bytes, sectors),
233 __field(enum bkey_type, type)
234 __field(unsigned, bytes )
235 __field(unsigned, sectors )
239 __entry->type = btree_node_type(b);
240 __entry->bytes = bytes;
241 __entry->sectors = sectors;
244 TP_printk("bkey type %u bytes %u sectors %u",
245 __entry->type , __entry->bytes, __entry->sectors)
248 DEFINE_EVENT(btree_node, btree_node_alloc,
249 TP_PROTO(struct bch_fs *c, struct btree *b),
253 DEFINE_EVENT(btree_node, btree_node_free,
254 TP_PROTO(struct bch_fs *c, struct btree *b),
258 TRACE_EVENT(btree_node_reap,
259 TP_PROTO(struct bch_fs *c, struct btree *b, int ret),
263 __field(u64, bucket )
268 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
272 TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret)
275 DECLARE_EVENT_CLASS(btree_node_cannibalize_lock,
276 TP_PROTO(struct bch_fs *c),
280 __array(char, uuid, 16 )
284 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
287 TP_printk("%pU", __entry->uuid)
290 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail,
291 TP_PROTO(struct bch_fs *c),
295 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock,
296 TP_PROTO(struct bch_fs *c),
300 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize,
301 TP_PROTO(struct bch_fs *c),
305 DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
306 TP_PROTO(struct bch_fs *c),
310 TRACE_EVENT(btree_reserve_get_fail,
311 TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
312 TP_ARGS(c, required, cl),
315 __array(char, uuid, 16 )
316 __field(size_t, required )
317 __field(struct closure *, cl )
321 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
322 __entry->required = required;
326 TP_printk("%pU required %zu by %p", __entry->uuid,
327 __entry->required, __entry->cl)
330 TRACE_EVENT(btree_insert_key,
331 TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
335 __field(u64, b_bucket )
336 __field(u64, b_offset )
337 __field(u64, offset )
338 __field(u32, b_inode )
346 __entry->b_bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
347 __entry->level = b->level;
348 __entry->id = b->btree_id;
349 __entry->b_inode = b->key.k.p.inode;
350 __entry->b_offset = b->key.k.p.offset;
351 __entry->inode = k->k.p.inode;
352 __entry->offset = k->k.p.offset;
353 __entry->size = k->k.size;
356 TP_printk("bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
357 __entry->b_bucket, __entry->level, __entry->id,
358 __entry->b_inode, __entry->b_offset,
359 __entry->inode, __entry->offset, __entry->size)
362 DECLARE_EVENT_CLASS(btree_split,
363 TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
367 __field(u64, bucket )
371 __field(u64, offset )
376 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
377 __entry->level = b->level;
378 __entry->id = b->btree_id;
379 __entry->inode = b->key.k.p.inode;
380 __entry->offset = b->key.k.p.offset;
381 __entry->keys = keys;
384 TP_printk("bucket %llu(%u) id %u: %u:%llu keys %u",
385 __entry->bucket, __entry->level, __entry->id,
386 __entry->inode, __entry->offset, __entry->keys)
389 DEFINE_EVENT(btree_split, btree_node_split,
390 TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
394 DEFINE_EVENT(btree_split, btree_node_compact,
395 TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
399 DEFINE_EVENT(btree_node, btree_set_root,
400 TP_PROTO(struct bch_fs *c, struct btree *b),
404 /* Garbage collection */
406 TRACE_EVENT(btree_gc_coalesce,
407 TP_PROTO(struct bch_fs *c, struct btree *b, unsigned nodes),
408 TP_ARGS(c, b, nodes),
411 __field(u64, bucket )
415 __field(u64, offset )
416 __field(unsigned, nodes )
420 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
421 __entry->level = b->level;
422 __entry->id = b->btree_id;
423 __entry->inode = b->key.k.p.inode;
424 __entry->offset = b->key.k.p.offset;
425 __entry->nodes = nodes;
428 TP_printk("bucket %llu(%u) id %u: %u:%llu nodes %u",
429 __entry->bucket, __entry->level, __entry->id,
430 __entry->inode, __entry->offset, __entry->nodes)
433 TRACE_EVENT(btree_gc_coalesce_fail,
434 TP_PROTO(struct bch_fs *c, int reason),
439 __array(char, uuid, 16 )
443 __entry->reason = reason;
444 memcpy(__entry->uuid, c->disk_sb->user_uuid.b, 16);
447 TP_printk("%pU: %u", __entry->uuid, __entry->reason)
450 DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
451 TP_PROTO(struct bch_fs *c, struct btree *b),
455 DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
456 TP_PROTO(struct bch_fs *c, struct btree *b),
460 DEFINE_EVENT(bch_fs, gc_start,
461 TP_PROTO(struct bch_fs *c),
465 DEFINE_EVENT(bch_fs, gc_end,
466 TP_PROTO(struct bch_fs *c),
470 DEFINE_EVENT(bch_fs, gc_coalesce_start,
471 TP_PROTO(struct bch_fs *c),
475 DEFINE_EVENT(bch_fs, gc_coalesce_end,
476 TP_PROTO(struct bch_fs *c),
480 DEFINE_EVENT(bch_dev, sectors_saturated,
481 TP_PROTO(struct bch_dev *ca),
485 DEFINE_EVENT(bch_fs, gc_sectors_saturated,
486 TP_PROTO(struct bch_fs *c),
490 DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
491 TP_PROTO(struct bch_fs *c),
497 TRACE_EVENT(alloc_batch,
498 TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
499 TP_ARGS(ca, free, total),
502 __array(char, uuid, 16 )
503 __field(size_t, free )
504 __field(size_t, total )
508 memcpy(__entry->uuid, ca->uuid.b, 16);
509 __entry->free = free;
510 __entry->total = total;
513 TP_printk("%pU free %zu total %zu",
514 __entry->uuid, __entry->free, __entry->total)
517 DEFINE_EVENT(bch_dev, prio_write_start,
518 TP_PROTO(struct bch_dev *ca),
522 DEFINE_EVENT(bch_dev, prio_write_end,
523 TP_PROTO(struct bch_dev *ca),
527 TRACE_EVENT(invalidate,
528 TP_PROTO(struct bch_dev *ca, size_t bucket, unsigned sectors),
529 TP_ARGS(ca, bucket, sectors),
532 __field(unsigned, sectors )
534 __field(__u64, offset )
538 __entry->dev = ca->disk_sb.bdev->bd_dev;
539 __entry->offset = bucket << ca->bucket_bits;
540 __entry->sectors = sectors;
543 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
544 __entry->sectors, MAJOR(__entry->dev),
545 MINOR(__entry->dev), __entry->offset)
548 DEFINE_EVENT(bch_fs, rescale_prios,
549 TP_PROTO(struct bch_fs *c),
553 DECLARE_EVENT_CLASS(bucket_alloc,
554 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
555 TP_ARGS(ca, reserve),
558 __array(char, uuid, 16)
559 __field(enum alloc_reserve, reserve )
563 memcpy(__entry->uuid, ca->uuid.b, 16);
564 __entry->reserve = reserve;
567 TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
570 DEFINE_EVENT(bucket_alloc, bucket_alloc,
571 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
575 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
576 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
580 TRACE_EVENT(freelist_empty_fail,
581 TP_PROTO(struct bch_fs *c, enum alloc_reserve reserve,
583 TP_ARGS(c, reserve, cl),
586 __array(char, uuid, 16 )
587 __field(enum alloc_reserve, reserve )
588 __field(struct closure *, cl )
592 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
593 __entry->reserve = reserve;
597 TP_printk("%pU reserve %d cl %p", __entry->uuid, __entry->reserve,
601 DECLARE_EVENT_CLASS(open_bucket_alloc,
602 TP_PROTO(struct bch_fs *c, struct closure *cl),
606 __array(char, uuid, 16 )
607 __field(struct closure *, cl )
611 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
615 TP_printk("%pU cl %p",
616 __entry->uuid, __entry->cl)
619 DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc,
620 TP_PROTO(struct bch_fs *c, struct closure *cl),
624 DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc_fail,
625 TP_PROTO(struct bch_fs *c, struct closure *cl),
631 DECLARE_EVENT_CLASS(moving_io,
632 TP_PROTO(struct bkey *k),
636 __field(__u32, inode )
637 __field(__u64, offset )
638 __field(__u32, sectors )
642 __entry->inode = k->p.inode;
643 __entry->offset = k->p.offset;
644 __entry->sectors = k->size;
647 TP_printk("%u:%llu sectors %u",
648 __entry->inode, __entry->offset, __entry->sectors)
651 DEFINE_EVENT(moving_io, move_read,
652 TP_PROTO(struct bkey *k),
656 DEFINE_EVENT(moving_io, move_read_done,
657 TP_PROTO(struct bkey *k),
661 DEFINE_EVENT(moving_io, move_write,
662 TP_PROTO(struct bkey *k),
666 DEFINE_EVENT(moving_io, copy_collision,
667 TP_PROTO(struct bkey *k),
673 DEFINE_EVENT(page_alloc_fail, moving_gc_alloc_fail,
674 TP_PROTO(struct bch_fs *c, u64 size),
678 DEFINE_EVENT(bch_dev, moving_gc_start,
679 TP_PROTO(struct bch_dev *ca),
683 TRACE_EVENT(moving_gc_end,
684 TP_PROTO(struct bch_dev *ca, u64 sectors_moved, u64 keys_moved,
686 TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
689 __array(char, uuid, 16 )
690 __field(u64, sectors_moved )
691 __field(u64, keys_moved )
692 __field(u64, buckets_moved )
696 memcpy(__entry->uuid, ca->uuid.b, 16);
697 __entry->sectors_moved = sectors_moved;
698 __entry->keys_moved = keys_moved;
699 __entry->buckets_moved = buckets_moved;
702 TP_printk("%pU sectors_moved %llu keys_moved %llu buckets_moved %llu",
703 __entry->uuid, __entry->sectors_moved, __entry->keys_moved,
704 __entry->buckets_moved)
707 DEFINE_EVENT(bkey, gc_copy,
708 TP_PROTO(const struct bkey *k),
714 DEFINE_EVENT(page_alloc_fail, tiering_alloc_fail,
715 TP_PROTO(struct bch_fs *c, u64 size),
719 DEFINE_EVENT(bch_fs, tiering_start,
720 TP_PROTO(struct bch_fs *c),
724 TRACE_EVENT(tiering_end,
725 TP_PROTO(struct bch_fs *c, u64 sectors_moved,
727 TP_ARGS(c, sectors_moved, keys_moved),
730 __array(char, uuid, 16 )
731 __field(u64, sectors_moved )
732 __field(u64, keys_moved )
736 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
737 __entry->sectors_moved = sectors_moved;
738 __entry->keys_moved = keys_moved;
741 TP_printk("%pU sectors_moved %llu keys_moved %llu",
742 __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
745 DEFINE_EVENT(bkey, tiering_copy,
746 TP_PROTO(const struct bkey *k),
750 #endif /* _TRACE_BCACHE_H */
752 /* This part must be outside protection */
753 #include <trace/define_trace.h>