2 #define TRACE_SYSTEM bcachefs
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
7 #include <linux/tracepoint.h>
9 DECLARE_EVENT_CLASS(bpos,
10 TP_PROTO(struct bpos p),
19 __entry->inode = p.inode;
20 __entry->offset = p.offset;
23 TP_printk("%llu:%llu", __entry->inode, __entry->offset)
26 DECLARE_EVENT_CLASS(bkey,
27 TP_PROTO(const struct bkey *k),
37 __entry->inode = k->p.inode;
38 __entry->offset = k->p.offset;
39 __entry->size = k->size;
42 TP_printk("%llu:%llu len %u", __entry->inode,
43 __entry->offset, __entry->size)
46 DECLARE_EVENT_CLASS(bch_dev,
47 TP_PROTO(struct bch_dev *ca),
51 __array(char, uuid, 16 )
55 memcpy(__entry->uuid, ca->uuid.b, 16);
58 TP_printk("%pU", __entry->uuid)
61 DECLARE_EVENT_CLASS(bch_fs,
62 TP_PROTO(struct bch_fs *c),
66 __array(char, uuid, 16 )
70 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
73 TP_printk("%pU", __entry->uuid)
76 DECLARE_EVENT_CLASS(bio,
77 TP_PROTO(struct bio *bio),
82 __field(sector_t, sector )
83 __field(unsigned int, nr_sector )
84 __array(char, rwbs, 6 )
88 __entry->dev = bio->bi_disk ? bio_dev(bio) : 0;
89 __entry->sector = bio->bi_iter.bi_sector;
90 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
91 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
94 TP_printk("%d,%d %s %llu + %u",
95 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
96 (unsigned long long)__entry->sector, __entry->nr_sector)
101 DEFINE_EVENT(bio, read_split,
102 TP_PROTO(struct bio *bio),
106 DEFINE_EVENT(bio, read_bounce,
107 TP_PROTO(struct bio *bio),
111 DEFINE_EVENT(bio, read_retry,
112 TP_PROTO(struct bio *bio),
116 DEFINE_EVENT(bio, promote,
117 TP_PROTO(struct bio *bio),
123 DEFINE_EVENT(bch_fs, journal_full,
124 TP_PROTO(struct bch_fs *c),
128 DEFINE_EVENT(bch_fs, journal_entry_full,
129 TP_PROTO(struct bch_fs *c),
133 DEFINE_EVENT(bio, journal_write,
134 TP_PROTO(struct bio *bio),
140 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
141 TP_PROTO(struct bpos p),
147 DECLARE_EVENT_CLASS(btree_node,
148 TP_PROTO(struct bch_fs *c, struct btree *b),
152 __array(char, uuid, 16 )
156 __field(u64, offset )
160 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
161 __entry->level = b->level;
162 __entry->id = b->btree_id;
163 __entry->inode = b->key.k.p.inode;
164 __entry->offset = b->key.k.p.offset;
167 TP_printk("%pU %u id %u %llu:%llu",
168 __entry->uuid, __entry->level, __entry->id,
169 __entry->inode, __entry->offset)
172 DEFINE_EVENT(btree_node, btree_read,
173 TP_PROTO(struct bch_fs *c, struct btree *b),
177 TRACE_EVENT(btree_write,
178 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
179 TP_ARGS(b, bytes, sectors),
182 __field(enum bkey_type, type)
183 __field(unsigned, bytes )
184 __field(unsigned, sectors )
188 __entry->type = btree_node_type(b);
189 __entry->bytes = bytes;
190 __entry->sectors = sectors;
193 TP_printk("bkey type %u bytes %u sectors %u",
194 __entry->type , __entry->bytes, __entry->sectors)
197 DEFINE_EVENT(btree_node, btree_node_alloc,
198 TP_PROTO(struct bch_fs *c, struct btree *b),
202 DEFINE_EVENT(btree_node, btree_node_free,
203 TP_PROTO(struct bch_fs *c, struct btree *b),
207 DEFINE_EVENT(btree_node, btree_node_reap,
208 TP_PROTO(struct bch_fs *c, struct btree *b),
212 DECLARE_EVENT_CLASS(btree_node_cannibalize_lock,
213 TP_PROTO(struct bch_fs *c),
217 __array(char, uuid, 16 )
221 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
224 TP_printk("%pU", __entry->uuid)
227 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail,
228 TP_PROTO(struct bch_fs *c),
232 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock,
233 TP_PROTO(struct bch_fs *c),
237 DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize,
238 TP_PROTO(struct bch_fs *c),
242 DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
243 TP_PROTO(struct bch_fs *c),
247 TRACE_EVENT(btree_reserve_get_fail,
248 TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
249 TP_ARGS(c, required, cl),
252 __array(char, uuid, 16 )
253 __field(size_t, required )
254 __field(struct closure *, cl )
258 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
259 __entry->required = required;
263 TP_printk("%pU required %zu by %p", __entry->uuid,
264 __entry->required, __entry->cl)
267 TRACE_EVENT(btree_insert_key,
268 TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
274 __field(u64, offset )
279 __entry->id = b->btree_id;
280 __entry->inode = k->k.p.inode;
281 __entry->offset = k->k.p.offset;
282 __entry->size = k->k.size;
285 TP_printk("btree %u: %llu:%llu len %u", __entry->id,
286 __entry->inode, __entry->offset, __entry->size)
289 DEFINE_EVENT(btree_node, btree_split,
290 TP_PROTO(struct bch_fs *c, struct btree *b),
294 DEFINE_EVENT(btree_node, btree_compact,
295 TP_PROTO(struct bch_fs *c, struct btree *b),
299 DEFINE_EVENT(btree_node, btree_merge,
300 TP_PROTO(struct bch_fs *c, struct btree *b),
304 DEFINE_EVENT(btree_node, btree_set_root,
305 TP_PROTO(struct bch_fs *c, struct btree *b),
309 /* Garbage collection */
311 DEFINE_EVENT(btree_node, btree_gc_coalesce,
312 TP_PROTO(struct bch_fs *c, struct btree *b),
316 TRACE_EVENT(btree_gc_coalesce_fail,
317 TP_PROTO(struct bch_fs *c, int reason),
322 __array(char, uuid, 16 )
326 __entry->reason = reason;
327 memcpy(__entry->uuid, c->disk_sb.sb->user_uuid.b, 16);
330 TP_printk("%pU: %u", __entry->uuid, __entry->reason)
333 DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
334 TP_PROTO(struct bch_fs *c, struct btree *b),
338 DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
339 TP_PROTO(struct bch_fs *c, struct btree *b),
343 DEFINE_EVENT(bch_fs, gc_start,
344 TP_PROTO(struct bch_fs *c),
348 DEFINE_EVENT(bch_fs, gc_end,
349 TP_PROTO(struct bch_fs *c),
353 DEFINE_EVENT(bch_fs, gc_coalesce_start,
354 TP_PROTO(struct bch_fs *c),
358 DEFINE_EVENT(bch_fs, gc_coalesce_end,
359 TP_PROTO(struct bch_fs *c),
363 DEFINE_EVENT(bch_dev, sectors_saturated,
364 TP_PROTO(struct bch_dev *ca),
368 DEFINE_EVENT(bch_fs, gc_sectors_saturated,
369 TP_PROTO(struct bch_fs *c),
373 DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
374 TP_PROTO(struct bch_fs *c),
380 TRACE_EVENT(alloc_batch,
381 TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
382 TP_ARGS(ca, free, total),
385 __array(char, uuid, 16 )
386 __field(size_t, free )
387 __field(size_t, total )
391 memcpy(__entry->uuid, ca->uuid.b, 16);
392 __entry->free = free;
393 __entry->total = total;
396 TP_printk("%pU free %zu total %zu",
397 __entry->uuid, __entry->free, __entry->total)
400 TRACE_EVENT(invalidate,
401 TP_PROTO(struct bch_dev *ca, u64 offset, unsigned sectors),
402 TP_ARGS(ca, offset, sectors),
405 __field(unsigned, sectors )
407 __field(__u64, offset )
411 __entry->dev = ca->disk_sb.bdev->bd_dev;
412 __entry->offset = offset,
413 __entry->sectors = sectors;
416 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
417 __entry->sectors, MAJOR(__entry->dev),
418 MINOR(__entry->dev), __entry->offset)
421 DEFINE_EVENT(bch_fs, rescale_prios,
422 TP_PROTO(struct bch_fs *c),
426 DECLARE_EVENT_CLASS(bucket_alloc,
427 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
428 TP_ARGS(ca, reserve),
431 __array(char, uuid, 16)
432 __field(enum alloc_reserve, reserve )
436 memcpy(__entry->uuid, ca->uuid.b, 16);
437 __entry->reserve = reserve;
440 TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
443 DEFINE_EVENT(bucket_alloc, bucket_alloc,
444 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
448 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
449 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
453 DEFINE_EVENT(bucket_alloc, open_bucket_alloc_fail,
454 TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
460 DEFINE_EVENT(bkey, move_extent,
461 TP_PROTO(const struct bkey *k),
465 DEFINE_EVENT(bkey, move_alloc_fail,
466 TP_PROTO(const struct bkey *k),
470 DEFINE_EVENT(bkey, move_race,
471 TP_PROTO(const struct bkey *k),
475 TRACE_EVENT(move_data,
476 TP_PROTO(struct bch_fs *c, u64 sectors_moved,
478 TP_ARGS(c, sectors_moved, keys_moved),
481 __array(char, uuid, 16 )
482 __field(u64, sectors_moved )
483 __field(u64, keys_moved )
487 memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
488 __entry->sectors_moved = sectors_moved;
489 __entry->keys_moved = keys_moved;
492 TP_printk("%pU sectors_moved %llu keys_moved %llu",
493 __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
497 TP_PROTO(struct bch_dev *ca,
498 u64 sectors_moved, u64 sectors_not_moved,
499 u64 buckets_moved, u64 buckets_not_moved),
501 sectors_moved, sectors_not_moved,
502 buckets_moved, buckets_not_moved),
505 __array(char, uuid, 16 )
506 __field(u64, sectors_moved )
507 __field(u64, sectors_not_moved )
508 __field(u64, buckets_moved )
509 __field(u64, buckets_not_moved )
513 memcpy(__entry->uuid, ca->uuid.b, 16);
514 __entry->sectors_moved = sectors_moved;
515 __entry->sectors_not_moved = sectors_not_moved;
516 __entry->buckets_moved = buckets_moved;
517 __entry->buckets_not_moved = buckets_moved;
520 TP_printk("%pU sectors moved %llu remain %llu buckets moved %llu remain %llu",
522 __entry->sectors_moved, __entry->sectors_not_moved,
523 __entry->buckets_moved, __entry->buckets_not_moved)
526 #endif /* _TRACE_BCACHE_H */
528 /* This part must be outside protection */
529 #include <trace/define_trace.h>