]> git.sesse.net Git - bcachefs-tools-debian/blob - include/trace/events/bcachefs.h
Update bcachefs sources to 24c6361e20 bcachefs: Fix a trans path overflow in bch2_btr...
[bcachefs-tools-debian] / include / trace / events / bcachefs.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHE_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(bkey,
36         TP_PROTO(const struct bkey *k),
37         TP_ARGS(k),
38
39         TP_STRUCT__entry(
40                 __field(u64,    inode                           )
41                 __field(u64,    offset                          )
42                 __field(u32,    size                            )
43         ),
44
45         TP_fast_assign(
46                 __entry->inode  = k->p.inode;
47                 __entry->offset = k->p.offset;
48                 __entry->size   = k->size;
49         ),
50
51         TP_printk("%llu:%llu len %u", __entry->inode,
52                   __entry->offset, __entry->size)
53 );
54
55 DECLARE_EVENT_CLASS(btree_node,
56         TP_PROTO(struct bch_fs *c, struct btree *b),
57         TP_ARGS(c, b),
58
59         TP_STRUCT__entry(
60                 __field(dev_t,          dev                     )
61                 __field(u8,             level                   )
62                 __field(u8,             btree_id                )
63                 TRACE_BPOS_entries(pos)
64         ),
65
66         TP_fast_assign(
67                 __entry->dev            = c->dev;
68                 __entry->level          = b->c.level;
69                 __entry->btree_id       = b->c.btree_id;
70                 TRACE_BPOS_assign(pos, b->key.k.p);
71         ),
72
73         TP_printk("%d,%d %u %s %llu:%llu:%u",
74                   MAJOR(__entry->dev), MINOR(__entry->dev),
75                   __entry->level,
76                   bch2_btree_ids[__entry->btree_id],
77                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
78 );
79
80 DECLARE_EVENT_CLASS(bch_fs,
81         TP_PROTO(struct bch_fs *c),
82         TP_ARGS(c),
83
84         TP_STRUCT__entry(
85                 __field(dev_t,          dev                     )
86         ),
87
88         TP_fast_assign(
89                 __entry->dev            = c->dev;
90         ),
91
92         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
93 );
94
95 DECLARE_EVENT_CLASS(bio,
96         TP_PROTO(struct bio *bio),
97         TP_ARGS(bio),
98
99         TP_STRUCT__entry(
100                 __field(dev_t,          dev                     )
101                 __field(sector_t,       sector                  )
102                 __field(unsigned int,   nr_sector               )
103                 __array(char,           rwbs,   6               )
104         ),
105
106         TP_fast_assign(
107                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
108                 __entry->sector         = bio->bi_iter.bi_sector;
109                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
110                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
111         ),
112
113         TP_printk("%d,%d  %s %llu + %u",
114                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
115                   (unsigned long long)__entry->sector, __entry->nr_sector)
116 );
117
118 /* super-io.c: */
119 TRACE_EVENT(write_super,
120         TP_PROTO(struct bch_fs *c, unsigned long ip),
121         TP_ARGS(c, ip),
122
123         TP_STRUCT__entry(
124                 __field(dev_t,          dev     )
125                 __field(unsigned long,  ip      )
126         ),
127
128         TP_fast_assign(
129                 __entry->dev            = c->dev;
130                 __entry->ip             = ip;
131         ),
132
133         TP_printk("%d,%d for %pS",
134                   MAJOR(__entry->dev), MINOR(__entry->dev),
135                   (void *) __entry->ip)
136 );
137
138 /* io.c: */
139
140 DEFINE_EVENT(bio, read_promote,
141         TP_PROTO(struct bio *bio),
142         TP_ARGS(bio)
143 );
144
145 DEFINE_EVENT(bio, read_bounce,
146         TP_PROTO(struct bio *bio),
147         TP_ARGS(bio)
148 );
149
150 DEFINE_EVENT(bio, read_split,
151         TP_PROTO(struct bio *bio),
152         TP_ARGS(bio)
153 );
154
155 DEFINE_EVENT(bio, read_retry,
156         TP_PROTO(struct bio *bio),
157         TP_ARGS(bio)
158 );
159
160 DEFINE_EVENT(bio, read_reuse_race,
161         TP_PROTO(struct bio *bio),
162         TP_ARGS(bio)
163 );
164
165 /* Journal */
166
167 DEFINE_EVENT(bch_fs, journal_full,
168         TP_PROTO(struct bch_fs *c),
169         TP_ARGS(c)
170 );
171
172 DEFINE_EVENT(bch_fs, journal_entry_full,
173         TP_PROTO(struct bch_fs *c),
174         TP_ARGS(c)
175 );
176
177 DEFINE_EVENT(bio, journal_write,
178         TP_PROTO(struct bio *bio),
179         TP_ARGS(bio)
180 );
181
182 TRACE_EVENT(journal_reclaim_start,
183         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
184                  u64 min_nr, u64 min_key_cache,
185                  u64 prereserved, u64 prereserved_total,
186                  u64 btree_cache_dirty, u64 btree_cache_total,
187                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
188         TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
189                 btree_cache_dirty, btree_cache_total,
190                 btree_key_cache_dirty, btree_key_cache_total),
191
192         TP_STRUCT__entry(
193                 __field(dev_t,          dev                     )
194                 __field(bool,           direct                  )
195                 __field(bool,           kicked                  )
196                 __field(u64,            min_nr                  )
197                 __field(u64,            min_key_cache           )
198                 __field(u64,            prereserved             )
199                 __field(u64,            prereserved_total       )
200                 __field(u64,            btree_cache_dirty       )
201                 __field(u64,            btree_cache_total       )
202                 __field(u64,            btree_key_cache_dirty   )
203                 __field(u64,            btree_key_cache_total   )
204         ),
205
206         TP_fast_assign(
207                 __entry->dev                    = c->dev;
208                 __entry->direct                 = direct;
209                 __entry->kicked                 = kicked;
210                 __entry->min_nr                 = min_nr;
211                 __entry->min_key_cache          = min_key_cache;
212                 __entry->prereserved            = prereserved;
213                 __entry->prereserved_total      = prereserved_total;
214                 __entry->btree_cache_dirty      = btree_cache_dirty;
215                 __entry->btree_cache_total      = btree_cache_total;
216                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
217                 __entry->btree_key_cache_total  = btree_key_cache_total;
218         ),
219
220         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
221                   MAJOR(__entry->dev), MINOR(__entry->dev),
222                   __entry->direct,
223                   __entry->kicked,
224                   __entry->min_nr,
225                   __entry->min_key_cache,
226                   __entry->prereserved,
227                   __entry->prereserved_total,
228                   __entry->btree_cache_dirty,
229                   __entry->btree_cache_total,
230                   __entry->btree_key_cache_dirty,
231                   __entry->btree_key_cache_total)
232 );
233
234 TRACE_EVENT(journal_reclaim_finish,
235         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
236         TP_ARGS(c, nr_flushed),
237
238         TP_STRUCT__entry(
239                 __field(dev_t,          dev                     )
240                 __field(u64,            nr_flushed              )
241         ),
242
243         TP_fast_assign(
244                 __entry->dev            = c->dev;
245                 __entry->nr_flushed     = nr_flushed;
246         ),
247
248         TP_printk("%d,%d flushed %llu",
249                   MAJOR(__entry->dev), MINOR(__entry->dev),
250                   __entry->nr_flushed)
251 );
252
253 /* bset.c: */
254
255 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
256         TP_PROTO(const struct bpos *p),
257         TP_ARGS(p)
258 );
259
260 /* Btree cache: */
261
262 TRACE_EVENT(btree_cache_scan,
263         TP_PROTO(long nr_to_scan, long can_free, long ret),
264         TP_ARGS(nr_to_scan, can_free, ret),
265
266         TP_STRUCT__entry(
267                 __field(long,   nr_to_scan              )
268                 __field(long,   can_free                )
269                 __field(long,   ret                     )
270         ),
271
272         TP_fast_assign(
273                 __entry->nr_to_scan     = nr_to_scan;
274                 __entry->can_free       = can_free;
275                 __entry->ret            = ret;
276         ),
277
278         TP_printk("scanned for %li nodes, can free %li, ret %li",
279                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
280 );
281
282 DEFINE_EVENT(btree_node, btree_cache_reap,
283         TP_PROTO(struct bch_fs *c, struct btree *b),
284         TP_ARGS(c, b)
285 );
286
287 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
288         TP_PROTO(struct bch_fs *c),
289         TP_ARGS(c)
290 );
291
292 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
293         TP_PROTO(struct bch_fs *c),
294         TP_ARGS(c)
295 );
296
297 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
298         TP_PROTO(struct bch_fs *c),
299         TP_ARGS(c)
300 );
301
302 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
303         TP_PROTO(struct bch_fs *c),
304         TP_ARGS(c)
305 );
306
307 /* Btree */
308
309 DEFINE_EVENT(btree_node, btree_node_read,
310         TP_PROTO(struct bch_fs *c, struct btree *b),
311         TP_ARGS(c, b)
312 );
313
314 TRACE_EVENT(btree_node_write,
315         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
316         TP_ARGS(b, bytes, sectors),
317
318         TP_STRUCT__entry(
319                 __field(enum btree_node_type,   type)
320                 __field(unsigned,       bytes                   )
321                 __field(unsigned,       sectors                 )
322         ),
323
324         TP_fast_assign(
325                 __entry->type   = btree_node_type(b);
326                 __entry->bytes  = bytes;
327                 __entry->sectors = sectors;
328         ),
329
330         TP_printk("bkey type %u bytes %u sectors %u",
331                   __entry->type , __entry->bytes, __entry->sectors)
332 );
333
334 DEFINE_EVENT(btree_node, btree_node_alloc,
335         TP_PROTO(struct bch_fs *c, struct btree *b),
336         TP_ARGS(c, b)
337 );
338
339 DEFINE_EVENT(btree_node, btree_node_free,
340         TP_PROTO(struct bch_fs *c, struct btree *b),
341         TP_ARGS(c, b)
342 );
343
344 TRACE_EVENT(btree_reserve_get_fail,
345         TP_PROTO(const char *trans_fn,
346                  unsigned long caller_ip,
347                  size_t required),
348         TP_ARGS(trans_fn, caller_ip, required),
349
350         TP_STRUCT__entry(
351                 __array(char,                   trans_fn, 32    )
352                 __field(unsigned long,          caller_ip       )
353                 __field(size_t,                 required        )
354         ),
355
356         TP_fast_assign(
357                 strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
358                 __entry->caller_ip      = caller_ip;
359                 __entry->required       = required;
360         ),
361
362         TP_printk("%s %pS required %zu",
363                   __entry->trans_fn,
364                   (void *) __entry->caller_ip,
365                   __entry->required)
366 );
367
368 DEFINE_EVENT(btree_node, btree_node_compact,
369         TP_PROTO(struct bch_fs *c, struct btree *b),
370         TP_ARGS(c, b)
371 );
372
373 DEFINE_EVENT(btree_node, btree_node_merge,
374         TP_PROTO(struct bch_fs *c, struct btree *b),
375         TP_ARGS(c, b)
376 );
377
378 DEFINE_EVENT(btree_node, btree_node_split,
379         TP_PROTO(struct bch_fs *c, struct btree *b),
380         TP_ARGS(c, b)
381 );
382
383 DEFINE_EVENT(btree_node, btree_node_rewrite,
384         TP_PROTO(struct bch_fs *c, struct btree *b),
385         TP_ARGS(c, b)
386 );
387
388 DEFINE_EVENT(btree_node, btree_node_set_root,
389         TP_PROTO(struct bch_fs *c, struct btree *b),
390         TP_ARGS(c, b)
391 );
392
393 TRACE_EVENT(btree_path_relock_fail,
394         TP_PROTO(struct btree_trans *trans,
395                  unsigned long caller_ip,
396                  struct btree_path *path,
397                  unsigned level),
398         TP_ARGS(trans, caller_ip, path, level),
399
400         TP_STRUCT__entry(
401                 __array(char,                   trans_fn, 32    )
402                 __field(unsigned long,          caller_ip       )
403                 __field(u8,                     btree_id        )
404                 __field(u8,                     level           )
405                 TRACE_BPOS_entries(pos)
406                 __array(char,                   node, 24        )
407                 __field(u32,                    iter_lock_seq   )
408                 __field(u32,                    node_lock_seq   )
409         ),
410
411         TP_fast_assign(
412                 struct btree *b = btree_path_node(path, level);
413
414                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
415                 __entry->caller_ip              = caller_ip;
416                 __entry->btree_id               = path->btree_id;
417                 __entry->level                  = path->level;
418                 TRACE_BPOS_assign(pos, path->pos);
419                 if (IS_ERR(b))
420                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
421                 else
422                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
423                 __entry->iter_lock_seq          = path->l[level].lock_seq;
424                 __entry->node_lock_seq          = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
425         ),
426
427         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u",
428                   __entry->trans_fn,
429                   (void *) __entry->caller_ip,
430                   bch2_btree_ids[__entry->btree_id],
431                   __entry->pos_inode,
432                   __entry->pos_offset,
433                   __entry->pos_snapshot,
434                   __entry->level,
435                   __entry->node,
436                   __entry->iter_lock_seq,
437                   __entry->node_lock_seq)
438 );
439
440 TRACE_EVENT(btree_path_upgrade_fail,
441         TP_PROTO(struct btree_trans *trans,
442                  unsigned long caller_ip,
443                  struct btree_path *path,
444                  unsigned level),
445         TP_ARGS(trans, caller_ip, path, level),
446
447         TP_STRUCT__entry(
448                 __array(char,                   trans_fn, 32    )
449                 __field(unsigned long,          caller_ip       )
450                 __field(u8,                     btree_id        )
451                 __field(u8,                     level           )
452                 TRACE_BPOS_entries(pos)
453                 __field(u8,                     locked          )
454                 __field(u8,                     self_read_count )
455                 __field(u8,                     self_intent_count)
456                 __field(u8,                     read_count      )
457                 __field(u8,                     intent_count    )
458                 __field(u32,                    iter_lock_seq   )
459                 __field(u32,                    node_lock_seq   )
460         ),
461
462         TP_fast_assign(
463                 struct six_lock_count c;
464
465                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
466                 __entry->caller_ip              = caller_ip;
467                 __entry->btree_id               = path->btree_id;
468                 __entry->level                  = level;
469                 TRACE_BPOS_assign(pos, path->pos);
470                 __entry->locked                 = btree_node_locked(path, level);
471
472                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
473                 __entry->self_read_count        = c.n[SIX_LOCK_read];
474                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
475                 c = six_lock_counts(&path->l[level].b->c.lock);
476                 __entry->read_count             = c.n[SIX_LOCK_read];
477                 __entry->intent_count           = c.n[SIX_LOCK_read];
478                 __entry->iter_lock_seq          = path->l[level].lock_seq;
479                 __entry->node_lock_seq          = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
480         ),
481
482         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
483                   __entry->trans_fn,
484                   (void *) __entry->caller_ip,
485                   bch2_btree_ids[__entry->btree_id],
486                   __entry->pos_inode,
487                   __entry->pos_offset,
488                   __entry->pos_snapshot,
489                   __entry->level,
490                   __entry->locked,
491                   __entry->self_read_count,
492                   __entry->self_intent_count,
493                   __entry->read_count,
494                   __entry->intent_count,
495                   __entry->iter_lock_seq,
496                   __entry->node_lock_seq)
497 );
498
499 /* Garbage collection */
500
501 DEFINE_EVENT(bch_fs, gc_gens_start,
502         TP_PROTO(struct bch_fs *c),
503         TP_ARGS(c)
504 );
505
506 DEFINE_EVENT(bch_fs, gc_gens_end,
507         TP_PROTO(struct bch_fs *c),
508         TP_ARGS(c)
509 );
510
511 /* Allocator */
512
513 TRACE_EVENT(bucket_alloc,
514         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
515                  bool user, u64 bucket),
516         TP_ARGS(ca, alloc_reserve, user, bucket),
517
518         TP_STRUCT__entry(
519                 __field(dev_t,                  dev     )
520                 __array(char,   reserve,        16      )
521                 __field(bool,                   user    )
522                 __field(u64,                    bucket  )
523         ),
524
525         TP_fast_assign(
526                 __entry->dev            = ca->dev;
527                 strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
528                 __entry->user           = user;
529                 __entry->bucket         = bucket;
530         ),
531
532         TP_printk("%d,%d reserve %s user %u bucket %llu",
533                   MAJOR(__entry->dev), MINOR(__entry->dev),
534                   __entry->reserve,
535                   __entry->user,
536                   __entry->bucket)
537 );
538
539 TRACE_EVENT(bucket_alloc_fail,
540         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
541                  u64 free,
542                  u64 avail,
543                  u64 copygc_wait_amount,
544                  s64 copygc_waiting_for,
545                  u64 seen,
546                  u64 open,
547                  u64 need_journal_commit,
548                  u64 nouse,
549                  bool nonblocking,
550                  const char *err),
551         TP_ARGS(ca, alloc_reserve, free, avail, copygc_wait_amount, copygc_waiting_for,
552                 seen, open, need_journal_commit, nouse, nonblocking, err),
553
554         TP_STRUCT__entry(
555                 __field(dev_t,                  dev                     )
556                 __array(char,   reserve,        16                      )
557                 __field(u64,                    free                    )
558                 __field(u64,                    avail                   )
559                 __field(u64,                    copygc_wait_amount      )
560                 __field(s64,                    copygc_waiting_for      )
561                 __field(u64,                    seen                    )
562                 __field(u64,                    open                    )
563                 __field(u64,                    need_journal_commit     )
564                 __field(u64,                    nouse                   )
565                 __field(bool,                   nonblocking             )
566                 __array(char,                   err,    32              )
567         ),
568
569         TP_fast_assign(
570                 __entry->dev            = ca->dev;
571                 strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
572                 __entry->free           = free;
573                 __entry->avail          = avail;
574                 __entry->copygc_wait_amount     = copygc_wait_amount;
575                 __entry->copygc_waiting_for     = copygc_waiting_for;
576                 __entry->seen           = seen;
577                 __entry->open           = open;
578                 __entry->need_journal_commit = need_journal_commit;
579                 __entry->nouse          = nouse;
580                 __entry->nonblocking    = nonblocking;
581                 strlcpy(__entry->err, err, sizeof(__entry->err));
582         ),
583
584         TP_printk("%d,%d reserve %s free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s",
585                   MAJOR(__entry->dev), MINOR(__entry->dev),
586                   __entry->reserve,
587                   __entry->free,
588                   __entry->avail,
589                   __entry->copygc_wait_amount,
590                   __entry->copygc_waiting_for,
591                   __entry->seen,
592                   __entry->open,
593                   __entry->need_journal_commit,
594                   __entry->nouse,
595                   __entry->nonblocking,
596                   __entry->err)
597 );
598
599 TRACE_EVENT(discard_buckets,
600         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
601                  u64 need_journal_commit, u64 discarded, const char *err),
602         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
603
604         TP_STRUCT__entry(
605                 __field(dev_t,          dev                     )
606                 __field(u64,            seen                    )
607                 __field(u64,            open                    )
608                 __field(u64,            need_journal_commit     )
609                 __field(u64,            discarded               )
610                 __array(char,           err,    16              )
611         ),
612
613         TP_fast_assign(
614                 __entry->dev                    = c->dev;
615                 __entry->seen                   = seen;
616                 __entry->open                   = open;
617                 __entry->need_journal_commit    = need_journal_commit;
618                 __entry->discarded              = discarded;
619                 strlcpy(__entry->err, err, sizeof(__entry->err));
620         ),
621
622         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
623                   MAJOR(__entry->dev), MINOR(__entry->dev),
624                   __entry->seen,
625                   __entry->open,
626                   __entry->need_journal_commit,
627                   __entry->discarded,
628                   __entry->err)
629 );
630
631 TRACE_EVENT(bucket_invalidate,
632         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
633         TP_ARGS(c, dev, bucket, sectors),
634
635         TP_STRUCT__entry(
636                 __field(dev_t,          dev                     )
637                 __field(u32,            dev_idx                 )
638                 __field(u32,            sectors                 )
639                 __field(u64,            bucket                  )
640         ),
641
642         TP_fast_assign(
643                 __entry->dev            = c->dev;
644                 __entry->dev_idx        = dev;
645                 __entry->sectors        = sectors;
646                 __entry->bucket         = bucket;
647         ),
648
649         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
650                   MAJOR(__entry->dev), MINOR(__entry->dev),
651                   __entry->dev_idx, __entry->bucket,
652                   __entry->sectors)
653 );
654
655 /* Moving IO */
656
657 DEFINE_EVENT(bkey, move_extent_read,
658         TP_PROTO(const struct bkey *k),
659         TP_ARGS(k)
660 );
661
662 DEFINE_EVENT(bkey, move_extent_write,
663         TP_PROTO(const struct bkey *k),
664         TP_ARGS(k)
665 );
666
667 DEFINE_EVENT(bkey, move_extent_finish,
668         TP_PROTO(const struct bkey *k),
669         TP_ARGS(k)
670 );
671
672 DEFINE_EVENT(bkey, move_extent_race,
673         TP_PROTO(const struct bkey *k),
674         TP_ARGS(k)
675 );
676
677 DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
678         TP_PROTO(const struct bkey *k),
679         TP_ARGS(k)
680 );
681
682 TRACE_EVENT(move_data,
683         TP_PROTO(struct bch_fs *c, u64 sectors_moved,
684                  u64 keys_moved),
685         TP_ARGS(c, sectors_moved, keys_moved),
686
687         TP_STRUCT__entry(
688                 __field(dev_t,          dev                     )
689                 __field(u64,            sectors_moved   )
690                 __field(u64,            keys_moved      )
691         ),
692
693         TP_fast_assign(
694                 __entry->dev                    = c->dev;
695                 __entry->sectors_moved = sectors_moved;
696                 __entry->keys_moved = keys_moved;
697         ),
698
699         TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
700                   MAJOR(__entry->dev), MINOR(__entry->dev),
701                   __entry->sectors_moved, __entry->keys_moved)
702 );
703
704 TRACE_EVENT(copygc,
705         TP_PROTO(struct bch_fs *c,
706                  u64 sectors_moved, u64 sectors_not_moved,
707                  u64 buckets_moved, u64 buckets_not_moved),
708         TP_ARGS(c,
709                 sectors_moved, sectors_not_moved,
710                 buckets_moved, buckets_not_moved),
711
712         TP_STRUCT__entry(
713                 __field(dev_t,          dev                     )
714                 __field(u64,            sectors_moved           )
715                 __field(u64,            sectors_not_moved       )
716                 __field(u64,            buckets_moved           )
717                 __field(u64,            buckets_not_moved       )
718         ),
719
720         TP_fast_assign(
721                 __entry->dev                    = c->dev;
722                 __entry->sectors_moved          = sectors_moved;
723                 __entry->sectors_not_moved      = sectors_not_moved;
724                 __entry->buckets_moved          = buckets_moved;
725                 __entry->buckets_not_moved = buckets_moved;
726         ),
727
728         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
729                   MAJOR(__entry->dev), MINOR(__entry->dev),
730                   __entry->sectors_moved, __entry->sectors_not_moved,
731                   __entry->buckets_moved, __entry->buckets_not_moved)
732 );
733
734 TRACE_EVENT(copygc_wait,
735         TP_PROTO(struct bch_fs *c,
736                  u64 wait_amount, u64 until),
737         TP_ARGS(c, wait_amount, until),
738
739         TP_STRUCT__entry(
740                 __field(dev_t,          dev                     )
741                 __field(u64,            wait_amount             )
742                 __field(u64,            until                   )
743         ),
744
745         TP_fast_assign(
746                 __entry->dev            = c->dev;
747                 __entry->wait_amount    = wait_amount;
748                 __entry->until          = until;
749         ),
750
751         TP_printk("%d,%u waiting for %llu sectors until %llu",
752                   MAJOR(__entry->dev), MINOR(__entry->dev),
753                   __entry->wait_amount, __entry->until)
754 );
755
756 /* btree transactions: */
757
758 DECLARE_EVENT_CLASS(transaction_event,
759         TP_PROTO(struct btree_trans *trans,
760                  unsigned long caller_ip),
761         TP_ARGS(trans, caller_ip),
762
763         TP_STRUCT__entry(
764                 __array(char,                   trans_fn, 32    )
765                 __field(unsigned long,          caller_ip       )
766         ),
767
768         TP_fast_assign(
769                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
770                 __entry->caller_ip              = caller_ip;
771         ),
772
773         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
774 );
775
776 DEFINE_EVENT(transaction_event, transaction_commit,
777         TP_PROTO(struct btree_trans *trans,
778                  unsigned long caller_ip),
779         TP_ARGS(trans, caller_ip)
780 );
781
782 DEFINE_EVENT(transaction_event, trans_restart_injected,
783         TP_PROTO(struct btree_trans *trans,
784                  unsigned long caller_ip),
785         TP_ARGS(trans, caller_ip)
786 );
787
788 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
789         TP_PROTO(struct btree_trans *trans,
790                  unsigned long caller_ip),
791         TP_ARGS(trans, caller_ip)
792 );
793
794 DEFINE_EVENT(transaction_event, trans_restart_journal_res_get,
795         TP_PROTO(struct btree_trans *trans,
796                  unsigned long caller_ip),
797         TP_ARGS(trans, caller_ip)
798 );
799
800
801 TRACE_EVENT(trans_restart_journal_preres_get,
802         TP_PROTO(struct btree_trans *trans,
803                  unsigned long caller_ip,
804                  unsigned flags),
805         TP_ARGS(trans, caller_ip, flags),
806
807         TP_STRUCT__entry(
808                 __array(char,                   trans_fn, 32    )
809                 __field(unsigned long,          caller_ip       )
810                 __field(unsigned,               flags           )
811         ),
812
813         TP_fast_assign(
814                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
815                 __entry->caller_ip              = caller_ip;
816                 __entry->flags                  = flags;
817         ),
818
819         TP_printk("%s %pS %x", __entry->trans_fn,
820                   (void *) __entry->caller_ip,
821                   __entry->flags)
822 );
823
824 DEFINE_EVENT(transaction_event, trans_restart_journal_reclaim,
825         TP_PROTO(struct btree_trans *trans,
826                  unsigned long caller_ip),
827         TP_ARGS(trans, caller_ip)
828 );
829
830 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
831         TP_PROTO(struct btree_trans *trans,
832                  unsigned long caller_ip),
833         TP_ARGS(trans, caller_ip)
834 );
835
836 DEFINE_EVENT(transaction_event, trans_traverse_all,
837         TP_PROTO(struct btree_trans *trans,
838                  unsigned long caller_ip),
839         TP_ARGS(trans, caller_ip)
840 );
841
842 DEFINE_EVENT(transaction_event, trans_restart_mark_replicas,
843         TP_PROTO(struct btree_trans *trans,
844                  unsigned long caller_ip),
845         TP_ARGS(trans, caller_ip)
846 );
847
848 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
849         TP_PROTO(struct btree_trans *trans,
850                  unsigned long caller_ip),
851         TP_ARGS(trans, caller_ip)
852 );
853
854 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
855         TP_PROTO(struct btree_trans *trans,
856                  unsigned long caller_ip),
857         TP_ARGS(trans, caller_ip)
858 );
859
860 DECLARE_EVENT_CLASS(transaction_restart_iter,
861         TP_PROTO(struct btree_trans *trans,
862                  unsigned long caller_ip,
863                  struct btree_path *path),
864         TP_ARGS(trans, caller_ip, path),
865
866         TP_STRUCT__entry(
867                 __array(char,                   trans_fn, 32    )
868                 __field(unsigned long,          caller_ip       )
869                 __field(u8,                     btree_id        )
870                 TRACE_BPOS_entries(pos)
871         ),
872
873         TP_fast_assign(
874                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
875                 __entry->caller_ip              = caller_ip;
876                 __entry->btree_id               = path->btree_id;
877                 TRACE_BPOS_assign(pos, path->pos)
878         ),
879
880         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
881                   __entry->trans_fn,
882                   (void *) __entry->caller_ip,
883                   bch2_btree_ids[__entry->btree_id],
884                   __entry->pos_inode,
885                   __entry->pos_offset,
886                   __entry->pos_snapshot)
887 );
888
889 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
890         TP_PROTO(struct btree_trans *trans,
891                  unsigned long caller_ip,
892                  struct btree_path *path),
893         TP_ARGS(trans, caller_ip, path)
894 );
895
896 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
897         TP_PROTO(struct btree_trans *trans,
898                  unsigned long caller_ip,
899                  struct btree_path *path),
900         TP_ARGS(trans, caller_ip, path)
901 );
902
903 TRACE_EVENT(trans_restart_upgrade,
904         TP_PROTO(struct btree_trans *trans,
905                  unsigned long caller_ip,
906                  struct btree_path *path,
907                  unsigned old_locks_want,
908                  unsigned new_locks_want),
909         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
910
911         TP_STRUCT__entry(
912                 __array(char,                   trans_fn, 32    )
913                 __field(unsigned long,          caller_ip       )
914                 __field(u8,                     btree_id        )
915                 __field(u8,                     old_locks_want  )
916                 __field(u8,                     new_locks_want  )
917                 TRACE_BPOS_entries(pos)
918         ),
919
920         TP_fast_assign(
921                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
922                 __entry->caller_ip              = caller_ip;
923                 __entry->btree_id               = path->btree_id;
924                 __entry->old_locks_want         = old_locks_want;
925                 __entry->new_locks_want         = new_locks_want;
926                 TRACE_BPOS_assign(pos, path->pos)
927         ),
928
929         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
930                   __entry->trans_fn,
931                   (void *) __entry->caller_ip,
932                   bch2_btree_ids[__entry->btree_id],
933                   __entry->pos_inode,
934                   __entry->pos_offset,
935                   __entry->pos_snapshot,
936                   __entry->old_locks_want,
937                   __entry->new_locks_want)
938 );
939
940 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
941         TP_PROTO(struct btree_trans *trans,
942                  unsigned long caller_ip,
943                  struct btree_path *path),
944         TP_ARGS(trans, caller_ip, path)
945 );
946
947 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
948         TP_PROTO(struct btree_trans *trans,
949                  unsigned long caller_ip,
950                  struct btree_path *path),
951         TP_ARGS(trans, caller_ip, path)
952 );
953
954 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
955         TP_PROTO(struct btree_trans *trans,
956                  unsigned long caller_ip,
957                  struct btree_path *path),
958         TP_ARGS(trans, caller_ip, path)
959 );
960
961 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
962         TP_PROTO(struct btree_trans *trans,
963                  unsigned long caller_ip,
964                  struct btree_path *path),
965         TP_ARGS(trans, caller_ip, path)
966 );
967
968 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
969         TP_PROTO(struct btree_trans *trans,
970                  unsigned long caller_ip),
971         TP_ARGS(trans, caller_ip)
972 );
973
974 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
975         TP_PROTO(struct btree_trans *trans,
976                  unsigned long caller_ip,
977                  struct btree_path *path),
978         TP_ARGS(trans, caller_ip, path)
979 );
980
981 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
982         TP_PROTO(struct btree_trans *trans,
983                  unsigned long caller_ip,
984                  struct btree_path *path),
985         TP_ARGS(trans, caller_ip, path)
986 );
987
988 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
989         TP_PROTO(struct btree_trans *trans,
990                  unsigned long caller_ip,
991                  struct btree_path *path),
992         TP_ARGS(trans, caller_ip, path)
993 );
994
995 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
996         TP_PROTO(struct btree_trans *trans,
997                  unsigned long caller_ip,
998                  struct btree_path *path),
999         TP_ARGS(trans, caller_ip, path)
1000 );
1001
1002 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1003         TP_PROTO(struct btree_trans *trans,
1004                  unsigned long caller_ip,
1005                  struct btree_path *path),
1006         TP_ARGS(trans, caller_ip, path)
1007 );
1008
1009 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
1010         TP_PROTO(struct btree_trans *trans,
1011                  unsigned long caller_ip),
1012         TP_ARGS(trans, caller_ip)
1013 );
1014
1015 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1016         TP_PROTO(struct btree_trans *trans,
1017                  unsigned long caller_ip),
1018         TP_ARGS(trans, caller_ip)
1019 );
1020
1021 TRACE_EVENT(trans_restart_would_deadlock_write,
1022         TP_PROTO(struct btree_trans *trans),
1023         TP_ARGS(trans),
1024
1025         TP_STRUCT__entry(
1026                 __array(char,                   trans_fn, 32    )
1027         ),
1028
1029         TP_fast_assign(
1030                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1031         ),
1032
1033         TP_printk("%s", __entry->trans_fn)
1034 );
1035
1036 TRACE_EVENT(trans_restart_mem_realloced,
1037         TP_PROTO(struct btree_trans *trans,
1038                  unsigned long caller_ip,
1039                  unsigned long bytes),
1040         TP_ARGS(trans, caller_ip, bytes),
1041
1042         TP_STRUCT__entry(
1043                 __array(char,                   trans_fn, 32    )
1044                 __field(unsigned long,          caller_ip       )
1045                 __field(unsigned long,          bytes           )
1046         ),
1047
1048         TP_fast_assign(
1049                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1050                 __entry->caller_ip      = caller_ip;
1051                 __entry->bytes          = bytes;
1052         ),
1053
1054         TP_printk("%s %pS bytes %lu",
1055                   __entry->trans_fn,
1056                   (void *) __entry->caller_ip,
1057                   __entry->bytes)
1058 );
1059
1060 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1061         TP_PROTO(struct btree_trans *trans,
1062                  unsigned long caller_ip,
1063                  struct btree_path *path,
1064                  unsigned old_u64s,
1065                  unsigned new_u64s),
1066         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1067
1068         TP_STRUCT__entry(
1069                 __array(char,                   trans_fn, 32    )
1070                 __field(unsigned long,          caller_ip       )
1071                 __field(enum btree_id,          btree_id        )
1072                 TRACE_BPOS_entries(pos)
1073                 __field(u32,                    old_u64s        )
1074                 __field(u32,                    new_u64s        )
1075         ),
1076
1077         TP_fast_assign(
1078                 strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1079                 __entry->caller_ip              = caller_ip;
1080
1081                 __entry->btree_id       = path->btree_id;
1082                 TRACE_BPOS_assign(pos, path->pos);
1083                 __entry->old_u64s       = old_u64s;
1084                 __entry->new_u64s       = new_u64s;
1085         ),
1086
1087         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1088                   __entry->trans_fn,
1089                   (void *) __entry->caller_ip,
1090                   bch2_btree_ids[__entry->btree_id],
1091                   __entry->pos_inode,
1092                   __entry->pos_offset,
1093                   __entry->pos_snapshot,
1094                   __entry->old_u64s,
1095                   __entry->new_u64s)
1096 );
1097
1098 #endif /* _TRACE_BCACHE_H */
1099
1100 /* This part must be outside protection */
1101 #include <trace/define_trace.h>