2 #define TRACE_SYSTEM bcache
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
7 #include <linux/tracepoint.h>
18 DECLARE_EVENT_CLASS(bcache_request,
19 TP_PROTO(struct bcache_device *d, struct bio *bio),
24 __field(unsigned int, orig_major )
25 __field(unsigned int, orig_minor )
26 __field(sector_t, sector )
27 __field(sector_t, orig_sector )
28 __field(unsigned int, nr_sector )
29 __array(char, rwbs, 6 )
33 __entry->dev = bio->bi_bdev->bd_dev;
34 __entry->orig_major = d->disk->major;
35 __entry->orig_minor = d->disk->first_minor;
36 __entry->sector = bio->bi_iter.bi_sector;
37 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
38 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
39 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
40 bio->bi_iter.bi_size);
43 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
44 MAJOR(__entry->dev), MINOR(__entry->dev),
45 __entry->rwbs, (unsigned long long)__entry->sector,
46 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
47 (unsigned long long)__entry->orig_sector)
50 DECLARE_EVENT_CLASS(bpos,
51 TP_PROTO(struct bpos p),
60 __entry->inode = p.inode;
61 __entry->offset = p.offset;
64 TP_printk("%llu:%llu", __entry->inode, __entry->offset)
67 DECLARE_EVENT_CLASS(bkey,
68 TP_PROTO(const struct bkey *k),
78 __entry->inode = k->p.inode;
79 __entry->offset = k->p.offset;
80 __entry->size = k->size;
83 TP_printk("%llu:%llu len %u", __entry->inode,
84 __entry->offset, __entry->size)
89 DEFINE_EVENT(bcache_request, bcache_request_start,
90 TP_PROTO(struct bcache_device *d, struct bio *bio),
94 DEFINE_EVENT(bcache_request, bcache_request_end,
95 TP_PROTO(struct bcache_device *d, struct bio *bio),
99 DECLARE_EVENT_CLASS(bcache_bio,
100 TP_PROTO(struct bio *bio),
105 __field(sector_t, sector )
106 __field(unsigned int, nr_sector )
107 __array(char, rwbs, 6 )
111 __entry->dev = bio->bi_bdev->bd_dev;
112 __entry->sector = bio->bi_iter.bi_sector;
113 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
114 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
115 bio->bi_iter.bi_size);
118 TP_printk("%d,%d %s %llu + %u",
119 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
120 (unsigned long long)__entry->sector, __entry->nr_sector)
123 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
124 TP_PROTO(struct bio *bio),
128 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
129 TP_PROTO(struct bio *bio),
133 DEFINE_EVENT(bcache_bio, bcache_promote,
134 TP_PROTO(struct bio *bio),
138 DEFINE_EVENT(bkey, bcache_promote_collision,
139 TP_PROTO(const struct bkey *k),
143 TRACE_EVENT(bcache_read,
144 TP_PROTO(struct bio *bio, bool hit, bool bypass),
145 TP_ARGS(bio, hit, bypass),
149 __field(sector_t, sector )
150 __field(unsigned int, nr_sector )
151 __array(char, rwbs, 6 )
152 __field(bool, cache_hit )
153 __field(bool, bypass )
157 __entry->dev = bio->bi_bdev->bd_dev;
158 __entry->sector = bio->bi_iter.bi_sector;
159 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
160 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
161 bio->bi_iter.bi_size);
162 __entry->cache_hit = hit;
163 __entry->bypass = bypass;
166 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
167 MAJOR(__entry->dev), MINOR(__entry->dev),
168 __entry->rwbs, (unsigned long long)__entry->sector,
169 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
172 TRACE_EVENT(bcache_write,
173 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
174 bool writeback, bool bypass),
175 TP_ARGS(c, inode, bio, writeback, bypass),
178 __array(char, uuid, 16 )
180 __field(sector_t, sector )
181 __field(unsigned int, nr_sector )
182 __array(char, rwbs, 6 )
183 __field(bool, writeback )
184 __field(bool, bypass )
188 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
189 __entry->inode = inode;
190 __entry->sector = bio->bi_iter.bi_sector;
191 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
192 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
193 bio->bi_iter.bi_size);
194 __entry->writeback = writeback;
195 __entry->bypass = bypass;
198 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
199 __entry->uuid, __entry->inode,
200 __entry->rwbs, (unsigned long long)__entry->sector,
201 __entry->nr_sector, __entry->writeback, __entry->bypass)
204 TRACE_EVENT(bcache_write_throttle,
205 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, u64 delay),
206 TP_ARGS(c, inode, bio, delay),
209 __array(char, uuid, 16 )
211 __field(sector_t, sector )
212 __field(unsigned int, nr_sector )
213 __array(char, rwbs, 6 )
218 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
219 __entry->inode = inode;
220 __entry->sector = bio->bi_iter.bi_sector;
221 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
222 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
223 bio->bi_iter.bi_size);
224 __entry->delay = delay;
227 TP_printk("%pU inode %llu %s %llu + %u delay %llu",
228 __entry->uuid, __entry->inode,
229 __entry->rwbs, (unsigned long long)__entry->sector,
230 __entry->nr_sector, __entry->delay)
233 DEFINE_EVENT(bcache_bio, bcache_read_retry,
234 TP_PROTO(struct bio *bio),
238 DECLARE_EVENT_CLASS(page_alloc_fail,
239 TP_PROTO(struct cache_set *c, u64 size),
243 __array(char, uuid, 16 )
248 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
249 __entry->size = size;
252 TP_printk("%pU size %llu", __entry->uuid, __entry->size)
257 DECLARE_EVENT_CLASS(cache_set,
258 TP_PROTO(struct cache_set *c),
262 __array(char, uuid, 16 )
266 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
269 TP_printk("%pU", __entry->uuid)
272 DEFINE_EVENT(bkey, bcache_journal_replay_key,
273 TP_PROTO(const struct bkey *k),
277 TRACE_EVENT(bcache_journal_next_bucket,
278 TP_PROTO(struct cache *ca, unsigned cur_idx, unsigned last_idx),
279 TP_ARGS(ca, cur_idx, last_idx),
282 __array(char, uuid, 16 )
283 __field(unsigned, cur_idx )
284 __field(unsigned, last_idx )
288 memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
289 __entry->cur_idx = cur_idx;
290 __entry->last_idx = last_idx;
293 TP_printk("%pU cur %u last %u", __entry->uuid,
294 __entry->cur_idx, __entry->last_idx)
297 TRACE_EVENT(bcache_journal_write_oldest,
298 TP_PROTO(struct cache_set *c, u64 seq),
302 __array(char, uuid, 16 )
307 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
311 TP_printk("%pU seq %llu", __entry->uuid, __entry->seq)
314 TRACE_EVENT(bcache_journal_write_oldest_done,
315 TP_PROTO(struct cache_set *c, u64 seq, unsigned written),
316 TP_ARGS(c, seq, written),
319 __array(char, uuid, 16 )
321 __field(unsigned, written )
325 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
327 __entry->written = written;
330 TP_printk("%pU seq %llu written %u", __entry->uuid, __entry->seq,
334 DEFINE_EVENT(cache_set, bcache_journal_full,
335 TP_PROTO(struct cache_set *c),
339 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
340 TP_PROTO(struct cache_set *c),
344 DEFINE_EVENT(bcache_bio, bcache_journal_write,
345 TP_PROTO(struct bio *bio),
349 /* Device state changes */
351 DEFINE_EVENT(cache_set, bcache_cache_set_read_only,
352 TP_PROTO(struct cache_set *c),
356 DEFINE_EVENT(cache_set, bcache_cache_set_read_only_done,
357 TP_PROTO(struct cache_set *c),
361 DECLARE_EVENT_CLASS(cache,
362 TP_PROTO(struct cache *ca),
366 __array(char, uuid, 16 )
367 __field(unsigned, tier )
371 memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
372 __entry->tier = ca->mi.tier;
375 TP_printk("%pU tier %u", __entry->uuid, __entry->tier)
378 DEFINE_EVENT(cache, bcache_cache_read_only,
379 TP_PROTO(struct cache *ca),
383 DEFINE_EVENT(cache, bcache_cache_read_only_done,
384 TP_PROTO(struct cache *ca),
388 DEFINE_EVENT(cache, bcache_cache_read_write,
389 TP_PROTO(struct cache *ca),
393 DEFINE_EVENT(cache, bcache_cache_read_write_done,
394 TP_PROTO(struct cache *ca),
400 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
401 TP_PROTO(struct bpos p),
407 DECLARE_EVENT_CLASS(btree_node,
408 TP_PROTO(struct cache_set *c, struct btree *b),
412 __array(char, uuid, 16 )
413 __field(u64, bucket )
417 __field(u64, offset )
421 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
422 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
423 __entry->level = b->level;
424 __entry->id = b->btree_id;
425 __entry->inode = b->key.k.p.inode;
426 __entry->offset = b->key.k.p.offset;
429 TP_printk("%pU bucket %llu(%u) id %u: %u:%llu",
430 __entry->uuid, __entry->bucket, __entry->level, __entry->id,
431 __entry->inode, __entry->offset)
434 DEFINE_EVENT(btree_node, bcache_btree_read,
435 TP_PROTO(struct cache_set *c, struct btree *b),
439 TRACE_EVENT(bcache_btree_write,
440 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
441 TP_ARGS(b, bytes, sectors),
444 __field(enum bkey_type, type)
445 __field(unsigned, bytes )
446 __field(unsigned, sectors )
450 __entry->type = btree_node_type(b);
451 __entry->bytes = bytes;
452 __entry->sectors = sectors;
455 TP_printk("bkey type %u bytes %u sectors %u",
456 __entry->type , __entry->bytes, __entry->sectors)
459 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
460 TP_PROTO(struct cache_set *c, struct btree *b),
464 TRACE_EVENT(bcache_btree_node_alloc_fail,
465 TP_PROTO(struct cache_set *c, enum btree_id id),
469 __array(char, uuid, 16 )
470 __field(enum btree_id, id )
474 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
478 TP_printk("%pU id %u", __entry->uuid, __entry->id)
481 DEFINE_EVENT(btree_node, bcache_btree_node_free,
482 TP_PROTO(struct cache_set *c, struct btree *b),
486 TRACE_EVENT(bcache_mca_reap,
487 TP_PROTO(struct cache_set *c, struct btree *b, int ret),
491 __field(u64, bucket )
496 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
500 TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret)
503 TRACE_EVENT(bcache_mca_scan,
504 TP_PROTO(struct cache_set *c, unsigned touched, unsigned freed,
505 unsigned can_free, unsigned long nr),
506 TP_ARGS(c, touched, freed, can_free, nr),
509 __array(char, uuid, 16 )
510 __field(unsigned long, touched )
511 __field(unsigned long, freed )
512 __field(unsigned long, can_free )
513 __field(unsigned long, nr )
517 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
518 __entry->touched = touched;
519 __entry->freed = freed;
520 __entry->can_free = can_free;
524 TP_printk("%pU touched %lu freed %lu can_free %lu nr %lu",
525 __entry->uuid, __entry->touched, __entry->freed,
526 __entry->can_free, __entry->nr)
529 DECLARE_EVENT_CLASS(mca_cannibalize_lock,
530 TP_PROTO(struct cache_set *c),
534 __array(char, uuid, 16 )
538 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
541 TP_printk("%pU", __entry->uuid)
544 DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock_fail,
545 TP_PROTO(struct cache_set *c),
549 DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock,
550 TP_PROTO(struct cache_set *c),
554 DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize,
555 TP_PROTO(struct cache_set *c),
559 DEFINE_EVENT(cache_set, bcache_mca_cannibalize_unlock,
560 TP_PROTO(struct cache_set *c),
564 TRACE_EVENT(bcache_btree_insert_key,
565 TP_PROTO(struct cache_set *c, struct btree *b, struct bkey_i *k),
569 __field(u64, b_bucket )
570 __field(u64, b_offset )
571 __field(u64, offset )
572 __field(u32, b_inode )
580 __entry->b_bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
581 __entry->level = b->level;
582 __entry->id = b->btree_id;
583 __entry->b_inode = b->key.k.p.inode;
584 __entry->b_offset = b->key.k.p.offset;
585 __entry->inode = k->k.p.inode;
586 __entry->offset = k->k.p.offset;
587 __entry->size = k->k.size;
590 TP_printk("bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
591 __entry->b_bucket, __entry->level, __entry->id,
592 __entry->b_inode, __entry->b_offset,
593 __entry->inode, __entry->offset, __entry->size)
596 DECLARE_EVENT_CLASS(btree_split,
597 TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
601 __field(u64, bucket )
605 __field(u64, offset )
610 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
611 __entry->level = b->level;
612 __entry->id = b->btree_id;
613 __entry->inode = b->key.k.p.inode;
614 __entry->offset = b->key.k.p.offset;
615 __entry->keys = keys;
618 TP_printk("bucket %llu(%u) id %u: %u:%llu keys %u",
619 __entry->bucket, __entry->level, __entry->id,
620 __entry->inode, __entry->offset, __entry->keys)
623 DEFINE_EVENT(btree_split, bcache_btree_node_split,
624 TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
628 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
629 TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
633 DEFINE_EVENT(btree_node, bcache_btree_set_root,
634 TP_PROTO(struct cache_set *c, struct btree *b),
638 /* Garbage collection */
640 TRACE_EVENT(bcache_btree_gc_coalesce,
641 TP_PROTO(struct cache_set *c, struct btree *b, unsigned nodes),
642 TP_ARGS(c, b, nodes),
645 __field(u64, bucket )
649 __field(u64, offset )
650 __field(unsigned, nodes )
654 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
655 __entry->level = b->level;
656 __entry->id = b->btree_id;
657 __entry->inode = b->key.k.p.inode;
658 __entry->offset = b->key.k.p.offset;
659 __entry->nodes = nodes;
662 TP_printk("bucket %llu(%u) id %u: %u:%llu nodes %u",
663 __entry->bucket, __entry->level, __entry->id,
664 __entry->inode, __entry->offset, __entry->nodes)
667 TRACE_EVENT(bcache_btree_gc_coalesce_fail,
668 TP_PROTO(struct cache_set *c, int reason),
673 __array(char, uuid, 16 )
677 __entry->reason = reason;
678 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
681 TP_printk("%pU: %u", __entry->uuid, __entry->reason)
684 TRACE_EVENT(bcache_btree_node_alloc_replacement,
685 TP_PROTO(struct cache_set *c, struct btree *old, struct btree *b),
689 __array(char, uuid, 16 )
690 __field(u64, bucket )
691 __field(u64, old_bucket )
695 __field(u64, offset )
699 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
700 __entry->old_bucket = PTR_BUCKET_NR_TRACE(c,
702 __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
703 __entry->level = b->level;
704 __entry->id = b->btree_id;
705 __entry->inode = b->key.k.p.inode;
706 __entry->offset = b->key.k.p.offset;
709 TP_printk("%pU for %llu bucket %llu(%u) id %u: %u:%llu",
710 __entry->uuid, __entry->old_bucket, __entry->bucket,
711 __entry->level, __entry->id,
712 __entry->inode, __entry->offset)
715 DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node,
716 TP_PROTO(struct cache_set *c, struct btree *b),
720 DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node_fail,
721 TP_PROTO(struct cache_set *c, struct btree *b),
725 DEFINE_EVENT(cache_set, bcache_gc_start,
726 TP_PROTO(struct cache_set *c),
730 DEFINE_EVENT(cache_set, bcache_gc_end,
731 TP_PROTO(struct cache_set *c),
735 DEFINE_EVENT(cache_set, bcache_gc_coalesce_start,
736 TP_PROTO(struct cache_set *c),
740 DEFINE_EVENT(cache_set, bcache_gc_coalesce_end,
741 TP_PROTO(struct cache_set *c),
745 DEFINE_EVENT(cache, bcache_sectors_saturated,
746 TP_PROTO(struct cache *ca),
750 DEFINE_EVENT(cache_set, bcache_gc_sectors_saturated,
751 TP_PROTO(struct cache_set *c),
755 DEFINE_EVENT(cache_set, bcache_gc_cannot_inc_gens,
756 TP_PROTO(struct cache_set *c),
760 DEFINE_EVENT(cache_set, bcache_gc_periodic,
761 TP_PROTO(struct cache_set *c),
765 TRACE_EVENT(bcache_mark_bucket,
766 TP_PROTO(struct cache *ca, const struct bkey *k,
767 const struct bch_extent_ptr *ptr,
768 int sectors, bool dirty),
769 TP_ARGS(ca, k, ptr, sectors, dirty),
772 __array(char, uuid, 16 )
774 __field(u64, offset )
775 __field(u32, sectors )
776 __field(u64, bucket )
777 __field(bool, dirty )
781 memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
782 __entry->inode = k->p.inode;
783 __entry->offset = k->p.offset;
784 __entry->sectors = sectors;
785 __entry->bucket = PTR_BUCKET_NR(ca, ptr);
786 __entry->dirty = dirty;
789 TP_printk("%pU %u:%llu sectors %i bucket %llu dirty %i",
790 __entry->uuid, __entry->inode, __entry->offset,
791 __entry->sectors, __entry->bucket, __entry->dirty)
796 TRACE_EVENT(bcache_alloc_batch,
797 TP_PROTO(struct cache *ca, size_t free, size_t total),
798 TP_ARGS(ca, free, total),
801 __array(char, uuid, 16 )
802 __field(size_t, free )
803 __field(size_t, total )
807 memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
808 __entry->free = free;
809 __entry->total = total;
812 TP_printk("%pU free %zu total %zu",
813 __entry->uuid, __entry->free, __entry->total)
816 TRACE_EVENT(bcache_btree_reserve_get_fail,
817 TP_PROTO(struct cache_set *c, size_t required, struct closure *cl),
818 TP_ARGS(c, required, cl),
821 __array(char, uuid, 16 )
822 __field(size_t, required )
823 __field(struct closure *, cl )
827 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
828 __entry->required = required;
832 TP_printk("%pU required %zu by %p", __entry->uuid,
833 __entry->required, __entry->cl)
836 DEFINE_EVENT(cache, bcache_prio_write_start,
837 TP_PROTO(struct cache *ca),
841 DEFINE_EVENT(cache, bcache_prio_write_end,
842 TP_PROTO(struct cache *ca),
846 TRACE_EVENT(bcache_invalidate,
847 TP_PROTO(struct cache *ca, size_t bucket, unsigned sectors),
848 TP_ARGS(ca, bucket, sectors),
851 __field(unsigned, sectors )
853 __field(__u64, offset )
857 __entry->dev = ca->disk_sb.bdev->bd_dev;
858 __entry->offset = bucket << ca->bucket_bits;
859 __entry->sectors = sectors;
862 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
863 __entry->sectors, MAJOR(__entry->dev),
864 MINOR(__entry->dev), __entry->offset)
867 DEFINE_EVENT(cache_set, bcache_rescale_prios,
868 TP_PROTO(struct cache_set *c),
872 DECLARE_EVENT_CLASS(cache_bucket_alloc,
873 TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
874 TP_ARGS(ca, reserve),
877 __array(char, uuid, 16)
878 __field(enum alloc_reserve, reserve )
882 memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
883 __entry->reserve = reserve;
886 TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
889 DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc,
890 TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
894 DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc_fail,
895 TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
899 DECLARE_EVENT_CLASS(cache_set_bucket_alloc,
900 TP_PROTO(struct cache_set *c, enum alloc_reserve reserve,
902 TP_ARGS(c, reserve, cl),
905 __array(char, uuid, 16 )
906 __field(enum alloc_reserve, reserve )
907 __field(struct closure *, cl )
911 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
912 __entry->reserve = reserve;
916 TP_printk("%pU reserve %d cl %p", __entry->uuid, __entry->reserve,
920 DEFINE_EVENT(cache_set_bucket_alloc, bcache_freelist_empty_fail,
921 TP_PROTO(struct cache_set *c, enum alloc_reserve reserve,
923 TP_ARGS(c, reserve, cl)
926 DECLARE_EVENT_CLASS(open_bucket_alloc,
927 TP_PROTO(struct cache_set *c, struct closure *cl),
931 __array(char, uuid, 16 )
932 __field(struct closure *, cl )
936 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
940 TP_printk("%pU cl %p",
941 __entry->uuid, __entry->cl)
944 DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc,
945 TP_PROTO(struct cache_set *c, struct closure *cl),
949 DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc_fail,
950 TP_PROTO(struct cache_set *c, struct closure *cl),
956 TRACE_EVENT(bcache_keyscan,
957 TP_PROTO(unsigned nr_found,
958 unsigned start_inode, u64 start_offset,
959 unsigned end_inode, u64 end_offset),
961 start_inode, start_offset,
962 end_inode, end_offset),
965 __field(__u32, nr_found )
966 __field(__u32, start_inode )
967 __field(__u64, start_offset )
968 __field(__u32, end_inode )
969 __field(__u64, end_offset )
973 __entry->nr_found = nr_found;
974 __entry->start_inode = start_inode;
975 __entry->start_offset = start_offset;
976 __entry->end_inode = end_inode;
977 __entry->end_offset = end_offset;
980 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
981 __entry->start_inode, __entry->start_offset,
982 __entry->end_inode, __entry->end_offset)
987 DECLARE_EVENT_CLASS(moving_io,
988 TP_PROTO(struct bkey *k),
992 __field(__u32, inode )
993 __field(__u64, offset )
994 __field(__u32, sectors )
998 __entry->inode = k->p.inode;
999 __entry->offset = k->p.offset;
1000 __entry->sectors = k->size;
1003 TP_printk("%u:%llu sectors %u",
1004 __entry->inode, __entry->offset, __entry->sectors)
1007 DEFINE_EVENT(moving_io, bcache_move_read,
1008 TP_PROTO(struct bkey *k),
1012 DEFINE_EVENT(moving_io, bcache_move_read_done,
1013 TP_PROTO(struct bkey *k),
1017 DEFINE_EVENT(moving_io, bcache_move_write,
1018 TP_PROTO(struct bkey *k),
1022 DEFINE_EVENT(moving_io, bcache_move_write_done,
1023 TP_PROTO(struct bkey *k),
1027 DEFINE_EVENT(moving_io, bcache_copy_collision,
1028 TP_PROTO(struct bkey *k),
1034 DEFINE_EVENT(page_alloc_fail, bcache_moving_gc_alloc_fail,
1035 TP_PROTO(struct cache_set *c, u64 size),
1039 DEFINE_EVENT(cache, bcache_moving_gc_start,
1040 TP_PROTO(struct cache *ca),
1044 TRACE_EVENT(bcache_moving_gc_end,
1045 TP_PROTO(struct cache *ca, u64 sectors_moved, u64 keys_moved,
1047 TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
1050 __array(char, uuid, 16 )
1051 __field(u64, sectors_moved )
1052 __field(u64, keys_moved )
1053 __field(u64, buckets_moved )
1057 memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
1058 __entry->sectors_moved = sectors_moved;
1059 __entry->keys_moved = keys_moved;
1060 __entry->buckets_moved = buckets_moved;
1063 TP_printk("%pU sectors_moved %llu keys_moved %llu buckets_moved %llu",
1064 __entry->uuid, __entry->sectors_moved, __entry->keys_moved,
1065 __entry->buckets_moved)
1068 DEFINE_EVENT(cache, bcache_moving_gc_reserve_empty,
1069 TP_PROTO(struct cache *ca),
1073 DEFINE_EVENT(cache, bcache_moving_gc_no_work,
1074 TP_PROTO(struct cache *ca),
1078 DEFINE_EVENT(bkey, bcache_gc_copy,
1079 TP_PROTO(const struct bkey *k),
1085 DEFINE_EVENT(cache_set, bcache_tiering_refill_start,
1086 TP_PROTO(struct cache_set *c),
1090 DEFINE_EVENT(cache_set, bcache_tiering_refill_end,
1091 TP_PROTO(struct cache_set *c),
1095 DEFINE_EVENT(page_alloc_fail, bcache_tiering_alloc_fail,
1096 TP_PROTO(struct cache_set *c, u64 size),
1100 DEFINE_EVENT(cache_set, bcache_tiering_start,
1101 TP_PROTO(struct cache_set *c),
1105 TRACE_EVENT(bcache_tiering_end,
1106 TP_PROTO(struct cache_set *c, u64 sectors_moved,
1108 TP_ARGS(c, sectors_moved, keys_moved),
1111 __array(char, uuid, 16 )
1112 __field(u64, sectors_moved )
1113 __field(u64, keys_moved )
1117 memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
1118 __entry->sectors_moved = sectors_moved;
1119 __entry->keys_moved = keys_moved;
1122 TP_printk("%pU sectors_moved %llu keys_moved %llu",
1123 __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
1126 DEFINE_EVENT(bkey, bcache_tiering_copy,
1127 TP_PROTO(const struct bkey *k),
1131 /* Background writeback */
1133 DEFINE_EVENT(bkey, bcache_writeback,
1134 TP_PROTO(const struct bkey *k),
1138 DEFINE_EVENT(bkey, bcache_writeback_collision,
1139 TP_PROTO(const struct bkey *k),
1143 TRACE_EVENT(bcache_writeback_error,
1144 TP_PROTO(struct bkey *k, bool write, int error),
1145 TP_ARGS(k, write, error),
1149 __field(u32, inode )
1150 __field(u64, offset )
1151 __field(bool, write )
1152 __field(int, error )
1156 __entry->inode = k->p.inode;
1157 __entry->offset = k->p.offset;
1158 __entry->size = k->size;
1159 __entry->write = write;
1160 __entry->error = error;
1163 TP_printk("%u:%llu len %u %s error %d", __entry->inode,
1164 __entry->offset, __entry->size,
1165 __entry->write ? "write" : "read",
1169 DEFINE_EVENT(page_alloc_fail, bcache_writeback_alloc_fail,
1170 TP_PROTO(struct cache_set *c, u64 size),
1174 #endif /* _TRACE_BCACHE_H */
1176 /* This part must be outside protection */
1177 #include <trace/define_trace.h>