1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM bcachefs
5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHE_H
8 #include <linux/tracepoint.h>
10 DECLARE_EVENT_CLASS(bpos,
11 TP_PROTO(struct bpos *p),
20 __entry->inode = p->inode;
21 __entry->offset = p->offset;
24 TP_printk("%llu:%llu", __entry->inode, __entry->offset)
27 DECLARE_EVENT_CLASS(bkey,
28 TP_PROTO(const struct bkey *k),
38 __entry->inode = k->p.inode;
39 __entry->offset = k->p.offset;
40 __entry->size = k->size;
43 TP_printk("%llu:%llu len %u", __entry->inode,
44 __entry->offset, __entry->size)
47 DECLARE_EVENT_CLASS(bch_fs,
48 TP_PROTO(struct bch_fs *c),
52 __array(char, uuid, 16 )
56 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
59 TP_printk("%pU", __entry->uuid)
62 DECLARE_EVENT_CLASS(bio,
63 TP_PROTO(struct bio *bio),
68 __field(sector_t, sector )
69 __field(unsigned int, nr_sector )
70 __array(char, rwbs, 6 )
74 __entry->dev = bio->bi_disk ? bio_dev(bio) : 0;
75 __entry->sector = bio->bi_iter.bi_sector;
76 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
77 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
80 TP_printk("%d,%d %s %llu + %u",
81 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
82 (unsigned long long)__entry->sector, __entry->nr_sector)
87 DEFINE_EVENT(bio, read_split,
88 TP_PROTO(struct bio *bio),
92 DEFINE_EVENT(bio, read_bounce,
93 TP_PROTO(struct bio *bio),
97 DEFINE_EVENT(bio, read_retry,
98 TP_PROTO(struct bio *bio),
102 DEFINE_EVENT(bio, promote,
103 TP_PROTO(struct bio *bio),
109 DEFINE_EVENT(bch_fs, journal_full,
110 TP_PROTO(struct bch_fs *c),
114 DEFINE_EVENT(bch_fs, journal_entry_full,
115 TP_PROTO(struct bch_fs *c),
119 DEFINE_EVENT(bio, journal_write,
120 TP_PROTO(struct bio *bio),
124 TRACE_EVENT(journal_reclaim_start,
125 TP_PROTO(struct bch_fs *c, u64 min_nr,
126 u64 prereserved, u64 prereserved_total,
127 u64 btree_cache_dirty, u64 btree_cache_total,
128 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
129 TP_ARGS(c, min_nr, prereserved, prereserved_total,
130 btree_cache_dirty, btree_cache_total,
131 btree_key_cache_dirty, btree_key_cache_total),
134 __array(char, uuid, 16 )
135 __field(u64, min_nr )
136 __field(u64, prereserved )
137 __field(u64, prereserved_total )
138 __field(u64, btree_cache_dirty )
139 __field(u64, btree_cache_total )
140 __field(u64, btree_key_cache_dirty )
141 __field(u64, btree_key_cache_total )
145 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
146 __entry->min_nr = min_nr;
147 __entry->prereserved = prereserved;
148 __entry->prereserved_total = prereserved_total;
149 __entry->btree_cache_dirty = btree_cache_dirty;
150 __entry->btree_cache_total = btree_cache_total;
151 __entry->btree_key_cache_dirty = btree_key_cache_dirty;
152 __entry->btree_key_cache_total = btree_key_cache_total;
155 TP_printk("%pU min %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
158 __entry->prereserved,
159 __entry->prereserved_total,
160 __entry->btree_cache_dirty,
161 __entry->btree_cache_total,
162 __entry->btree_key_cache_dirty,
163 __entry->btree_key_cache_total)
166 TRACE_EVENT(journal_reclaim_finish,
167 TP_PROTO(struct bch_fs *c, u64 nr_flushed),
168 TP_ARGS(c, nr_flushed),
171 __array(char, uuid, 16 )
172 __field(u64, nr_flushed )
176 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
177 __entry->nr_flushed = nr_flushed;
180 TP_printk("%pU flushed %llu", __entry->uuid, __entry->nr_flushed)
185 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
186 TP_PROTO(struct bpos *p),
192 DECLARE_EVENT_CLASS(btree_node,
193 TP_PROTO(struct bch_fs *c, struct btree *b),
197 __array(char, uuid, 16 )
201 __field(u64, offset )
205 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
206 __entry->level = b->c.level;
207 __entry->id = b->c.btree_id;
208 __entry->inode = b->key.k.p.inode;
209 __entry->offset = b->key.k.p.offset;
212 TP_printk("%pU %u id %u %llu:%llu",
213 __entry->uuid, __entry->level, __entry->id,
214 __entry->inode, __entry->offset)
217 DEFINE_EVENT(btree_node, btree_read,
218 TP_PROTO(struct bch_fs *c, struct btree *b),
222 TRACE_EVENT(btree_write,
223 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
224 TP_ARGS(b, bytes, sectors),
227 __field(enum btree_node_type, type)
228 __field(unsigned, bytes )
229 __field(unsigned, sectors )
233 __entry->type = btree_node_type(b);
234 __entry->bytes = bytes;
235 __entry->sectors = sectors;
238 TP_printk("bkey type %u bytes %u sectors %u",
239 __entry->type , __entry->bytes, __entry->sectors)
242 DEFINE_EVENT(btree_node, btree_node_alloc,
243 TP_PROTO(struct bch_fs *c, struct btree *b),
247 DEFINE_EVENT(btree_node, btree_node_free,
248 TP_PROTO(struct bch_fs *c, struct btree *b),
252 DEFINE_EVENT(btree_node, btree_node_reap,
253 TP_PROTO(struct bch_fs *c, struct btree *b),
257 DECLARE_EVENT_CLASS(btree_node_cannibalize_lock,
258 TP_PROTO(struct bch_fs *c),
262 __array(char, uuid, 16 )
266 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
269 TP_printk("%pU", __entry->uuid)
272 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail,
273 TP_PROTO(struct bch_fs *c),
277 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock,
278 TP_PROTO(struct bch_fs *c),
282 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize,
283 TP_PROTO(struct bch_fs *c),
287 DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
288 TP_PROTO(struct bch_fs *c),
292 TRACE_EVENT(btree_reserve_get_fail,
293 TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
294 TP_ARGS(c, required, cl),
297 __array(char, uuid, 16 )
298 __field(size_t, required )
299 __field(struct closure *, cl )
303 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
304 __entry->required = required;
308 TP_printk("%pU required %zu by %p", __entry->uuid,
309 __entry->required, __entry->cl)
312 TRACE_EVENT(btree_insert_key,
313 TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
319 __field(u64, offset )
324 __entry->id = b->c.btree_id;
325 __entry->inode = k->k.p.inode;
326 __entry->offset = k->k.p.offset;
327 __entry->size = k->k.size;
330 TP_printk("btree %u: %llu:%llu len %u", __entry->id,
331 __entry->inode, __entry->offset, __entry->size)
334 DEFINE_EVENT(btree_node, btree_split,
335 TP_PROTO(struct bch_fs *c, struct btree *b),
339 DEFINE_EVENT(btree_node, btree_compact,
340 TP_PROTO(struct bch_fs *c, struct btree *b),
344 DEFINE_EVENT(btree_node, btree_merge,
345 TP_PROTO(struct bch_fs *c, struct btree *b),
349 DEFINE_EVENT(btree_node, btree_set_root,
350 TP_PROTO(struct bch_fs *c, struct btree *b),
354 /* Garbage collection */
356 DEFINE_EVENT(btree_node, btree_gc_coalesce,
357 TP_PROTO(struct bch_fs *c, struct btree *b),
361 TRACE_EVENT(btree_gc_coalesce_fail,
362 TP_PROTO(struct bch_fs *c, int reason),
367 __array(char, uuid, 16 )
371 __entry->reason = reason;
372 memcpy(__entry->uuid, c->disk_sb.sb->user_uuid.b, 16);
375 TP_printk("%pU: %u", __entry->uuid, __entry->reason)
378 DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
379 TP_PROTO(struct bch_fs *c, struct btree *b),
383 DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
384 TP_PROTO(struct bch_fs *c, struct btree *b),
388 DEFINE_EVENT(bch_fs, gc_start,
389 TP_PROTO(struct bch_fs *c),
393 DEFINE_EVENT(bch_fs, gc_end,
394 TP_PROTO(struct bch_fs *c),
398 DEFINE_EVENT(bch_fs, gc_coalesce_start,
399 TP_PROTO(struct bch_fs *c),
403 DEFINE_EVENT(bch_fs, gc_coalesce_end,
404 TP_PROTO(struct bch_fs *c),
408 DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
409 TP_PROTO(struct bch_fs *c),
415 TRACE_EVENT(alloc_batch,
416 TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
417 TP_ARGS(ca, free, total),
420 __array(char, uuid, 16 )
421 __field(size_t, free )
422 __field(size_t, total )
426 memcpy(__entry->uuid, ca->uuid.b, 16);
427 __entry->free = free;
428 __entry->total = total;
431 TP_printk("%pU free %zu total %zu",
432 __entry->uuid, __entry->free, __entry->total)
435 TRACE_EVENT(invalidate,
436 TP_PROTO(struct bch_dev *ca, u64 offset, unsigned sectors),
437 TP_ARGS(ca, offset, sectors),
440 __field(unsigned, sectors )
442 __field(__u64, offset )
446 __entry->dev = ca->disk_sb.bdev->bd_dev;
447 __entry->offset = offset,
448 __entry->sectors = sectors;
451 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
452 __entry->sectors, MAJOR(__entry->dev),
453 MINOR(__entry->dev), __entry->offset)
456 DEFINE_EVENT(bch_fs, rescale_prios,
457 TP_PROTO(struct bch_fs *c),
461 DECLARE_EVENT_CLASS(bucket_alloc,
462 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
463 TP_ARGS(ca, reserve),
466 __array(char, uuid, 16)
467 __field(enum alloc_reserve, reserve )
471 memcpy(__entry->uuid, ca->uuid.b, 16);
472 __entry->reserve = reserve;
475 TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
478 DEFINE_EVENT(bucket_alloc, bucket_alloc,
479 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
483 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
484 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
488 DEFINE_EVENT(bucket_alloc, open_bucket_alloc_fail,
489 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
495 DEFINE_EVENT(bkey, move_extent,
496 TP_PROTO(const struct bkey *k),
500 DEFINE_EVENT(bkey, move_alloc_fail,
501 TP_PROTO(const struct bkey *k),
505 DEFINE_EVENT(bkey, move_race,
506 TP_PROTO(const struct bkey *k),
510 TRACE_EVENT(move_data,
511 TP_PROTO(struct bch_fs *c, u64 sectors_moved,
513 TP_ARGS(c, sectors_moved, keys_moved),
516 __array(char, uuid, 16 )
517 __field(u64, sectors_moved )
518 __field(u64, keys_moved )
522 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
523 __entry->sectors_moved = sectors_moved;
524 __entry->keys_moved = keys_moved;
527 TP_printk("%pU sectors_moved %llu keys_moved %llu",
528 __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
532 TP_PROTO(struct bch_fs *c,
533 u64 sectors_moved, u64 sectors_not_moved,
534 u64 buckets_moved, u64 buckets_not_moved),
536 sectors_moved, sectors_not_moved,
537 buckets_moved, buckets_not_moved),
540 __array(char, uuid, 16 )
541 __field(u64, sectors_moved )
542 __field(u64, sectors_not_moved )
543 __field(u64, buckets_moved )
544 __field(u64, buckets_not_moved )
548 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
549 __entry->sectors_moved = sectors_moved;
550 __entry->sectors_not_moved = sectors_not_moved;
551 __entry->buckets_moved = buckets_moved;
552 __entry->buckets_not_moved = buckets_moved;
555 TP_printk("%pU sectors moved %llu remain %llu buckets moved %llu remain %llu",
557 __entry->sectors_moved, __entry->sectors_not_moved,
558 __entry->buckets_moved, __entry->buckets_not_moved)
561 TRACE_EVENT(transaction_restart_ip,
562 TP_PROTO(unsigned long caller, unsigned long ip),
566 __field(unsigned long, caller )
567 __field(unsigned long, ip )
571 __entry->caller = caller;
575 TP_printk("%ps %pS", (void *) __entry->caller, (void *) __entry->ip)
578 DECLARE_EVENT_CLASS(transaction_restart,
579 TP_PROTO(unsigned long ip),
583 __field(unsigned long, ip )
590 TP_printk("%ps", (void *) __entry->ip)
593 DEFINE_EVENT(transaction_restart, trans_restart_btree_node_reused,
594 TP_PROTO(unsigned long ip),
598 TRACE_EVENT(trans_restart_would_deadlock,
599 TP_PROTO(unsigned long trans_ip,
600 unsigned long caller_ip,
602 enum btree_id have_btree_id,
603 unsigned have_iter_type,
604 enum btree_id want_btree_id,
605 unsigned want_iter_type),
606 TP_ARGS(trans_ip, caller_ip, reason,
607 have_btree_id, have_iter_type,
608 want_btree_id, want_iter_type),
611 __field(unsigned long, trans_ip )
612 __field(unsigned long, caller_ip )
614 __field(u8, have_btree_id )
615 __field(u8, have_iter_type )
616 __field(u8, want_btree_id )
617 __field(u8, want_iter_type )
621 __entry->trans_ip = trans_ip;
622 __entry->caller_ip = caller_ip;
623 __entry->reason = reason;
624 __entry->have_btree_id = have_btree_id;
625 __entry->have_iter_type = have_iter_type;
626 __entry->want_btree_id = want_btree_id;
627 __entry->want_iter_type = want_iter_type;
630 TP_printk("%ps %pS because %u have %u:%u want %u:%u",
631 (void *) __entry->trans_ip,
632 (void *) __entry->caller_ip,
634 __entry->have_btree_id,
635 __entry->have_iter_type,
636 __entry->want_btree_id,
637 __entry->want_iter_type)
640 TRACE_EVENT(trans_restart_iters_realloced,
641 TP_PROTO(unsigned long ip, unsigned nr),
645 __field(unsigned long, ip )
646 __field(unsigned, nr )
654 TP_printk("%ps nr %u", (void *) __entry->ip, __entry->nr)
657 TRACE_EVENT(trans_restart_mem_realloced,
658 TP_PROTO(unsigned long ip, unsigned long bytes),
662 __field(unsigned long, ip )
663 __field(unsigned long, bytes )
668 __entry->bytes = bytes;
671 TP_printk("%ps bytes %lu", (void *) __entry->ip, __entry->bytes)
674 DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get,
675 TP_PROTO(unsigned long ip),
679 DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get,
680 TP_PROTO(unsigned long ip),
684 DEFINE_EVENT(transaction_restart, trans_restart_journal_reclaim,
685 TP_PROTO(unsigned long ip),
689 DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
690 TP_PROTO(unsigned long ip),
694 DEFINE_EVENT(transaction_restart, trans_restart_fault_inject,
695 TP_PROTO(unsigned long ip),
699 DEFINE_EVENT(transaction_restart, trans_restart_btree_node_split,
700 TP_PROTO(unsigned long ip),
704 DEFINE_EVENT(transaction_restart, trans_restart_mark,
705 TP_PROTO(unsigned long ip),
709 DEFINE_EVENT(transaction_restart, trans_restart_upgrade,
710 TP_PROTO(unsigned long ip),
714 DEFINE_EVENT(transaction_restart, trans_restart_iter_upgrade,
715 TP_PROTO(unsigned long ip),
719 DEFINE_EVENT(transaction_restart, trans_restart_traverse,
720 TP_PROTO(unsigned long ip),
724 DECLARE_EVENT_CLASS(node_lock_fail,
725 TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
726 TP_ARGS(level, iter_seq, node, node_seq),
730 __field(u32, iter_seq)
732 __field(u32, node_seq)
736 __entry->level = level;
737 __entry->iter_seq = iter_seq;
738 __entry->node = node;
739 __entry->node_seq = node_seq;
742 TP_printk("level %u iter seq %u node %u node seq %u",
743 __entry->level, __entry->iter_seq,
744 __entry->node, __entry->node_seq)
747 DEFINE_EVENT(node_lock_fail, node_upgrade_fail,
748 TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
749 TP_ARGS(level, iter_seq, node, node_seq)
752 DEFINE_EVENT(node_lock_fail, node_relock_fail,
753 TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
754 TP_ARGS(level, iter_seq, node, node_seq)
757 #endif /* _TRACE_BCACHE_H */
759 /* This part must be outside protection */
760 #include <trace/define_trace.h>