]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/trace.h
Upload to experimental
[bcachefs-tools-debian] / libbcachefs / trace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(bkey,
36         TP_PROTO(struct bch_fs *c, const char *k),
37         TP_ARGS(c, k),
38
39         TP_STRUCT__entry(
40                 __string(k,     k                               )
41         ),
42
43         TP_fast_assign(
44                 __assign_str(k, k);
45         ),
46
47         TP_printk("%s", __get_str(k))
48 );
49
50 DECLARE_EVENT_CLASS(btree_node,
51         TP_PROTO(struct bch_fs *c, struct btree *b),
52         TP_ARGS(c, b),
53
54         TP_STRUCT__entry(
55                 __field(dev_t,          dev                     )
56                 __field(u8,             level                   )
57                 __field(u8,             btree_id                )
58                 TRACE_BPOS_entries(pos)
59         ),
60
61         TP_fast_assign(
62                 __entry->dev            = c->dev;
63                 __entry->level          = b->c.level;
64                 __entry->btree_id       = b->c.btree_id;
65                 TRACE_BPOS_assign(pos, b->key.k.p);
66         ),
67
68         TP_printk("%d,%d %u %s %llu:%llu:%u",
69                   MAJOR(__entry->dev), MINOR(__entry->dev),
70                   __entry->level,
71                   bch2_btree_id_str(__entry->btree_id),
72                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
73 );
74
75 DECLARE_EVENT_CLASS(bch_fs,
76         TP_PROTO(struct bch_fs *c),
77         TP_ARGS(c),
78
79         TP_STRUCT__entry(
80                 __field(dev_t,          dev                     )
81         ),
82
83         TP_fast_assign(
84                 __entry->dev            = c->dev;
85         ),
86
87         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
88 );
89
90 DECLARE_EVENT_CLASS(bio,
91         TP_PROTO(struct bio *bio),
92         TP_ARGS(bio),
93
94         TP_STRUCT__entry(
95                 __field(dev_t,          dev                     )
96                 __field(sector_t,       sector                  )
97                 __field(unsigned int,   nr_sector               )
98                 __array(char,           rwbs,   6               )
99         ),
100
101         TP_fast_assign(
102                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
103                 __entry->sector         = bio->bi_iter.bi_sector;
104                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
105                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
106         ),
107
108         TP_printk("%d,%d  %s %llu + %u",
109                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110                   (unsigned long long)__entry->sector, __entry->nr_sector)
111 );
112
113 /* super-io.c: */
114 TRACE_EVENT(write_super,
115         TP_PROTO(struct bch_fs *c, unsigned long ip),
116         TP_ARGS(c, ip),
117
118         TP_STRUCT__entry(
119                 __field(dev_t,          dev     )
120                 __field(unsigned long,  ip      )
121         ),
122
123         TP_fast_assign(
124                 __entry->dev            = c->dev;
125                 __entry->ip             = ip;
126         ),
127
128         TP_printk("%d,%d for %pS",
129                   MAJOR(__entry->dev), MINOR(__entry->dev),
130                   (void *) __entry->ip)
131 );
132
133 /* io.c: */
134
135 DEFINE_EVENT(bio, read_promote,
136         TP_PROTO(struct bio *bio),
137         TP_ARGS(bio)
138 );
139
140 TRACE_EVENT(read_nopromote,
141         TP_PROTO(struct bch_fs *c, int ret),
142         TP_ARGS(c, ret),
143
144         TP_STRUCT__entry(
145                 __field(dev_t,          dev             )
146                 __array(char,           ret, 32         )
147         ),
148
149         TP_fast_assign(
150                 __entry->dev            = c->dev;
151                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
152         ),
153
154         TP_printk("%d,%d ret %s",
155                   MAJOR(__entry->dev), MINOR(__entry->dev),
156                   __entry->ret)
157 );
158
159 DEFINE_EVENT(bio, read_bounce,
160         TP_PROTO(struct bio *bio),
161         TP_ARGS(bio)
162 );
163
164 DEFINE_EVENT(bio, read_split,
165         TP_PROTO(struct bio *bio),
166         TP_ARGS(bio)
167 );
168
169 DEFINE_EVENT(bio, read_retry,
170         TP_PROTO(struct bio *bio),
171         TP_ARGS(bio)
172 );
173
174 DEFINE_EVENT(bio, read_reuse_race,
175         TP_PROTO(struct bio *bio),
176         TP_ARGS(bio)
177 );
178
179 /* Journal */
180
181 DEFINE_EVENT(bch_fs, journal_full,
182         TP_PROTO(struct bch_fs *c),
183         TP_ARGS(c)
184 );
185
186 DEFINE_EVENT(bch_fs, journal_entry_full,
187         TP_PROTO(struct bch_fs *c),
188         TP_ARGS(c)
189 );
190
191 DEFINE_EVENT(bio, journal_write,
192         TP_PROTO(struct bio *bio),
193         TP_ARGS(bio)
194 );
195
196 TRACE_EVENT(journal_reclaim_start,
197         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
198                  u64 min_nr, u64 min_key_cache,
199                  u64 btree_cache_dirty, u64 btree_cache_total,
200                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
201         TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
202                 btree_cache_dirty, btree_cache_total,
203                 btree_key_cache_dirty, btree_key_cache_total),
204
205         TP_STRUCT__entry(
206                 __field(dev_t,          dev                     )
207                 __field(bool,           direct                  )
208                 __field(bool,           kicked                  )
209                 __field(u64,            min_nr                  )
210                 __field(u64,            min_key_cache           )
211                 __field(u64,            btree_cache_dirty       )
212                 __field(u64,            btree_cache_total       )
213                 __field(u64,            btree_key_cache_dirty   )
214                 __field(u64,            btree_key_cache_total   )
215         ),
216
217         TP_fast_assign(
218                 __entry->dev                    = c->dev;
219                 __entry->direct                 = direct;
220                 __entry->kicked                 = kicked;
221                 __entry->min_nr                 = min_nr;
222                 __entry->min_key_cache          = min_key_cache;
223                 __entry->btree_cache_dirty      = btree_cache_dirty;
224                 __entry->btree_cache_total      = btree_cache_total;
225                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
226                 __entry->btree_key_cache_total  = btree_key_cache_total;
227         ),
228
229         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
230                   MAJOR(__entry->dev), MINOR(__entry->dev),
231                   __entry->direct,
232                   __entry->kicked,
233                   __entry->min_nr,
234                   __entry->min_key_cache,
235                   __entry->btree_cache_dirty,
236                   __entry->btree_cache_total,
237                   __entry->btree_key_cache_dirty,
238                   __entry->btree_key_cache_total)
239 );
240
241 TRACE_EVENT(journal_reclaim_finish,
242         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
243         TP_ARGS(c, nr_flushed),
244
245         TP_STRUCT__entry(
246                 __field(dev_t,          dev                     )
247                 __field(u64,            nr_flushed              )
248         ),
249
250         TP_fast_assign(
251                 __entry->dev            = c->dev;
252                 __entry->nr_flushed     = nr_flushed;
253         ),
254
255         TP_printk("%d,%d flushed %llu",
256                   MAJOR(__entry->dev), MINOR(__entry->dev),
257                   __entry->nr_flushed)
258 );
259
260 /* bset.c: */
261
262 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
263         TP_PROTO(const struct bpos *p),
264         TP_ARGS(p)
265 );
266
267 /* Btree cache: */
268
269 TRACE_EVENT(btree_cache_scan,
270         TP_PROTO(long nr_to_scan, long can_free, long ret),
271         TP_ARGS(nr_to_scan, can_free, ret),
272
273         TP_STRUCT__entry(
274                 __field(long,   nr_to_scan              )
275                 __field(long,   can_free                )
276                 __field(long,   ret                     )
277         ),
278
279         TP_fast_assign(
280                 __entry->nr_to_scan     = nr_to_scan;
281                 __entry->can_free       = can_free;
282                 __entry->ret            = ret;
283         ),
284
285         TP_printk("scanned for %li nodes, can free %li, ret %li",
286                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
287 );
288
289 DEFINE_EVENT(btree_node, btree_cache_reap,
290         TP_PROTO(struct bch_fs *c, struct btree *b),
291         TP_ARGS(c, b)
292 );
293
294 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
295         TP_PROTO(struct bch_fs *c),
296         TP_ARGS(c)
297 );
298
299 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
300         TP_PROTO(struct bch_fs *c),
301         TP_ARGS(c)
302 );
303
304 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
305         TP_PROTO(struct bch_fs *c),
306         TP_ARGS(c)
307 );
308
309 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
310         TP_PROTO(struct bch_fs *c),
311         TP_ARGS(c)
312 );
313
314 /* Btree */
315
316 DEFINE_EVENT(btree_node, btree_node_read,
317         TP_PROTO(struct bch_fs *c, struct btree *b),
318         TP_ARGS(c, b)
319 );
320
321 TRACE_EVENT(btree_node_write,
322         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
323         TP_ARGS(b, bytes, sectors),
324
325         TP_STRUCT__entry(
326                 __field(enum btree_node_type,   type)
327                 __field(unsigned,       bytes                   )
328                 __field(unsigned,       sectors                 )
329         ),
330
331         TP_fast_assign(
332                 __entry->type   = btree_node_type(b);
333                 __entry->bytes  = bytes;
334                 __entry->sectors = sectors;
335         ),
336
337         TP_printk("bkey type %u bytes %u sectors %u",
338                   __entry->type , __entry->bytes, __entry->sectors)
339 );
340
341 DEFINE_EVENT(btree_node, btree_node_alloc,
342         TP_PROTO(struct bch_fs *c, struct btree *b),
343         TP_ARGS(c, b)
344 );
345
346 DEFINE_EVENT(btree_node, btree_node_free,
347         TP_PROTO(struct bch_fs *c, struct btree *b),
348         TP_ARGS(c, b)
349 );
350
351 TRACE_EVENT(btree_reserve_get_fail,
352         TP_PROTO(const char *trans_fn,
353                  unsigned long caller_ip,
354                  size_t required,
355                  int ret),
356         TP_ARGS(trans_fn, caller_ip, required, ret),
357
358         TP_STRUCT__entry(
359                 __array(char,                   trans_fn, 32    )
360                 __field(unsigned long,          caller_ip       )
361                 __field(size_t,                 required        )
362                 __array(char,                   ret, 32         )
363         ),
364
365         TP_fast_assign(
366                 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
367                 __entry->caller_ip      = caller_ip;
368                 __entry->required       = required;
369                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
370         ),
371
372         TP_printk("%s %pS required %zu ret %s",
373                   __entry->trans_fn,
374                   (void *) __entry->caller_ip,
375                   __entry->required,
376                   __entry->ret)
377 );
378
379 DEFINE_EVENT(btree_node, btree_node_compact,
380         TP_PROTO(struct bch_fs *c, struct btree *b),
381         TP_ARGS(c, b)
382 );
383
384 DEFINE_EVENT(btree_node, btree_node_merge,
385         TP_PROTO(struct bch_fs *c, struct btree *b),
386         TP_ARGS(c, b)
387 );
388
389 DEFINE_EVENT(btree_node, btree_node_split,
390         TP_PROTO(struct bch_fs *c, struct btree *b),
391         TP_ARGS(c, b)
392 );
393
394 DEFINE_EVENT(btree_node, btree_node_rewrite,
395         TP_PROTO(struct bch_fs *c, struct btree *b),
396         TP_ARGS(c, b)
397 );
398
399 DEFINE_EVENT(btree_node, btree_node_set_root,
400         TP_PROTO(struct bch_fs *c, struct btree *b),
401         TP_ARGS(c, b)
402 );
403
404 TRACE_EVENT(btree_path_relock_fail,
405         TP_PROTO(struct btree_trans *trans,
406                  unsigned long caller_ip,
407                  struct btree_path *path,
408                  unsigned level),
409         TP_ARGS(trans, caller_ip, path, level),
410
411         TP_STRUCT__entry(
412                 __array(char,                   trans_fn, 32    )
413                 __field(unsigned long,          caller_ip       )
414                 __field(u8,                     btree_id        )
415                 __field(u8,                     level           )
416                 TRACE_BPOS_entries(pos)
417                 __array(char,                   node, 24        )
418                 __field(u8,                     self_read_count )
419                 __field(u8,                     self_intent_count)
420                 __field(u8,                     read_count      )
421                 __field(u8,                     intent_count    )
422                 __field(u32,                    iter_lock_seq   )
423                 __field(u32,                    node_lock_seq   )
424         ),
425
426         TP_fast_assign(
427                 struct btree *b = btree_path_node(path, level);
428                 struct six_lock_count c;
429
430                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
431                 __entry->caller_ip              = caller_ip;
432                 __entry->btree_id               = path->btree_id;
433                 __entry->level                  = path->level;
434                 TRACE_BPOS_assign(pos, path->pos);
435
436                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
437                 __entry->self_read_count        = c.n[SIX_LOCK_read];
438                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
439
440                 if (IS_ERR(b)) {
441                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
442                 } else {
443                         c = six_lock_counts(&path->l[level].b->c.lock);
444                         __entry->read_count     = c.n[SIX_LOCK_read];
445                         __entry->intent_count   = c.n[SIX_LOCK_intent];
446                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
447                 }
448                 __entry->iter_lock_seq          = path->l[level].lock_seq;
449                 __entry->node_lock_seq          = is_btree_node(path, level)
450                         ? six_lock_seq(&path->l[level].b->c.lock)
451                         : 0;
452         ),
453
454         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
455                   __entry->trans_fn,
456                   (void *) __entry->caller_ip,
457                   bch2_btree_id_str(__entry->btree_id),
458                   __entry->pos_inode,
459                   __entry->pos_offset,
460                   __entry->pos_snapshot,
461                   __entry->level,
462                   __entry->node,
463                   __entry->self_read_count,
464                   __entry->self_intent_count,
465                   __entry->read_count,
466                   __entry->intent_count,
467                   __entry->iter_lock_seq,
468                   __entry->node_lock_seq)
469 );
470
471 TRACE_EVENT(btree_path_upgrade_fail,
472         TP_PROTO(struct btree_trans *trans,
473                  unsigned long caller_ip,
474                  struct btree_path *path,
475                  unsigned level),
476         TP_ARGS(trans, caller_ip, path, level),
477
478         TP_STRUCT__entry(
479                 __array(char,                   trans_fn, 32    )
480                 __field(unsigned long,          caller_ip       )
481                 __field(u8,                     btree_id        )
482                 __field(u8,                     level           )
483                 TRACE_BPOS_entries(pos)
484                 __field(u8,                     locked          )
485                 __field(u8,                     self_read_count )
486                 __field(u8,                     self_intent_count)
487                 __field(u8,                     read_count      )
488                 __field(u8,                     intent_count    )
489                 __field(u32,                    iter_lock_seq   )
490                 __field(u32,                    node_lock_seq   )
491         ),
492
493         TP_fast_assign(
494                 struct six_lock_count c;
495
496                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
497                 __entry->caller_ip              = caller_ip;
498                 __entry->btree_id               = path->btree_id;
499                 __entry->level                  = level;
500                 TRACE_BPOS_assign(pos, path->pos);
501                 __entry->locked                 = btree_node_locked(path, level);
502
503                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
504                 __entry->self_read_count        = c.n[SIX_LOCK_read];
505                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
506                 c = six_lock_counts(&path->l[level].b->c.lock);
507                 __entry->read_count             = c.n[SIX_LOCK_read];
508                 __entry->intent_count           = c.n[SIX_LOCK_intent];
509                 __entry->iter_lock_seq          = path->l[level].lock_seq;
510                 __entry->node_lock_seq          = is_btree_node(path, level)
511                         ? six_lock_seq(&path->l[level].b->c.lock)
512                         : 0;
513         ),
514
515         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
516                   __entry->trans_fn,
517                   (void *) __entry->caller_ip,
518                   bch2_btree_id_str(__entry->btree_id),
519                   __entry->pos_inode,
520                   __entry->pos_offset,
521                   __entry->pos_snapshot,
522                   __entry->level,
523                   __entry->locked,
524                   __entry->self_read_count,
525                   __entry->self_intent_count,
526                   __entry->read_count,
527                   __entry->intent_count,
528                   __entry->iter_lock_seq,
529                   __entry->node_lock_seq)
530 );
531
532 /* Garbage collection */
533
534 DEFINE_EVENT(bch_fs, gc_gens_start,
535         TP_PROTO(struct bch_fs *c),
536         TP_ARGS(c)
537 );
538
539 DEFINE_EVENT(bch_fs, gc_gens_end,
540         TP_PROTO(struct bch_fs *c),
541         TP_ARGS(c)
542 );
543
544 /* Allocator */
545
546 DECLARE_EVENT_CLASS(bucket_alloc,
547         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
548                  u64 bucket,
549                  u64 free,
550                  u64 avail,
551                  u64 copygc_wait_amount,
552                  s64 copygc_waiting_for,
553                  struct bucket_alloc_state *s,
554                  bool nonblocking,
555                  const char *err),
556         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
557                 copygc_wait_amount, copygc_waiting_for,
558                 s, nonblocking, err),
559
560         TP_STRUCT__entry(
561                 __field(u8,                     dev                     )
562                 __array(char,   reserve,        16                      )
563                 __field(u64,                    bucket  )
564                 __field(u64,                    free                    )
565                 __field(u64,                    avail                   )
566                 __field(u64,                    copygc_wait_amount      )
567                 __field(s64,                    copygc_waiting_for      )
568                 __field(u64,                    seen                    )
569                 __field(u64,                    open                    )
570                 __field(u64,                    need_journal_commit     )
571                 __field(u64,                    nouse                   )
572                 __field(bool,                   nonblocking             )
573                 __field(u64,                    nocow                   )
574                 __array(char,                   err,    32              )
575         ),
576
577         TP_fast_assign(
578                 __entry->dev            = ca->dev_idx;
579                 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
580                 __entry->bucket         = bucket;
581                 __entry->free           = free;
582                 __entry->avail          = avail;
583                 __entry->copygc_wait_amount     = copygc_wait_amount;
584                 __entry->copygc_waiting_for     = copygc_waiting_for;
585                 __entry->seen           = s->buckets_seen;
586                 __entry->open           = s->skipped_open;
587                 __entry->need_journal_commit = s->skipped_need_journal_commit;
588                 __entry->nouse          = s->skipped_nouse;
589                 __entry->nonblocking    = nonblocking;
590                 __entry->nocow          = s->skipped_nocow;
591                 strscpy(__entry->err, err, sizeof(__entry->err));
592         ),
593
594         TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
595                   __entry->reserve,
596                   __entry->dev,
597                   __entry->bucket,
598                   __entry->free,
599                   __entry->avail,
600                   __entry->copygc_wait_amount,
601                   __entry->copygc_waiting_for,
602                   __entry->seen,
603                   __entry->open,
604                   __entry->need_journal_commit,
605                   __entry->nouse,
606                   __entry->nocow,
607                   __entry->nonblocking,
608                   __entry->err)
609 );
610
611 DEFINE_EVENT(bucket_alloc, bucket_alloc,
612         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
613                  u64 bucket,
614                  u64 free,
615                  u64 avail,
616                  u64 copygc_wait_amount,
617                  s64 copygc_waiting_for,
618                  struct bucket_alloc_state *s,
619                  bool nonblocking,
620                  const char *err),
621         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
622                 copygc_wait_amount, copygc_waiting_for,
623                 s, nonblocking, err)
624 );
625
626 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
627         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
628                  u64 bucket,
629                  u64 free,
630                  u64 avail,
631                  u64 copygc_wait_amount,
632                  s64 copygc_waiting_for,
633                  struct bucket_alloc_state *s,
634                  bool nonblocking,
635                  const char *err),
636         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
637                 copygc_wait_amount, copygc_waiting_for,
638                 s, nonblocking, err)
639 );
640
641 TRACE_EVENT(discard_buckets,
642         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
643                  u64 need_journal_commit, u64 discarded, const char *err),
644         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
645
646         TP_STRUCT__entry(
647                 __field(dev_t,          dev                     )
648                 __field(u64,            seen                    )
649                 __field(u64,            open                    )
650                 __field(u64,            need_journal_commit     )
651                 __field(u64,            discarded               )
652                 __array(char,           err,    16              )
653         ),
654
655         TP_fast_assign(
656                 __entry->dev                    = c->dev;
657                 __entry->seen                   = seen;
658                 __entry->open                   = open;
659                 __entry->need_journal_commit    = need_journal_commit;
660                 __entry->discarded              = discarded;
661                 strscpy(__entry->err, err, sizeof(__entry->err));
662         ),
663
664         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
665                   MAJOR(__entry->dev), MINOR(__entry->dev),
666                   __entry->seen,
667                   __entry->open,
668                   __entry->need_journal_commit,
669                   __entry->discarded,
670                   __entry->err)
671 );
672
673 TRACE_EVENT(bucket_invalidate,
674         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
675         TP_ARGS(c, dev, bucket, sectors),
676
677         TP_STRUCT__entry(
678                 __field(dev_t,          dev                     )
679                 __field(u32,            dev_idx                 )
680                 __field(u32,            sectors                 )
681                 __field(u64,            bucket                  )
682         ),
683
684         TP_fast_assign(
685                 __entry->dev            = c->dev;
686                 __entry->dev_idx        = dev;
687                 __entry->sectors        = sectors;
688                 __entry->bucket         = bucket;
689         ),
690
691         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
692                   MAJOR(__entry->dev), MINOR(__entry->dev),
693                   __entry->dev_idx, __entry->bucket,
694                   __entry->sectors)
695 );
696
697 /* Moving IO */
698
699 TRACE_EVENT(bucket_evacuate,
700         TP_PROTO(struct bch_fs *c, struct bpos *bucket),
701         TP_ARGS(c, bucket),
702
703         TP_STRUCT__entry(
704                 __field(dev_t,          dev                     )
705                 __field(u32,            dev_idx                 )
706                 __field(u64,            bucket                  )
707         ),
708
709         TP_fast_assign(
710                 __entry->dev            = c->dev;
711                 __entry->dev_idx        = bucket->inode;
712                 __entry->bucket         = bucket->offset;
713         ),
714
715         TP_printk("%d:%d %u:%llu",
716                   MAJOR(__entry->dev), MINOR(__entry->dev),
717                   __entry->dev_idx, __entry->bucket)
718 );
719
720 DEFINE_EVENT(bkey, move_extent,
721         TP_PROTO(struct bch_fs *c, const char *k),
722         TP_ARGS(c, k)
723 );
724
725 DEFINE_EVENT(bkey, move_extent_read,
726         TP_PROTO(struct bch_fs *c, const char *k),
727         TP_ARGS(c, k)
728 );
729
730 DEFINE_EVENT(bkey, move_extent_write,
731         TP_PROTO(struct bch_fs *c, const char *k),
732         TP_ARGS(c, k)
733 );
734
735 DEFINE_EVENT(bkey, move_extent_finish,
736         TP_PROTO(struct bch_fs *c, const char *k),
737         TP_ARGS(c, k)
738 );
739
740 TRACE_EVENT(move_extent_fail,
741         TP_PROTO(struct bch_fs *c, const char *msg),
742         TP_ARGS(c, msg),
743
744         TP_STRUCT__entry(
745                 __field(dev_t,          dev                     )
746                 __string(msg,           msg                     )
747         ),
748
749         TP_fast_assign(
750                 __entry->dev            = c->dev;
751                 __assign_str(msg, msg);
752         ),
753
754         TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
755 );
756
757 DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
758         TP_PROTO(struct bch_fs *c, const char *k),
759         TP_ARGS(c, k)
760 );
761
762 TRACE_EVENT(move_data,
763         TP_PROTO(struct bch_fs *c,
764                  struct bch_move_stats *stats),
765         TP_ARGS(c, stats),
766
767         TP_STRUCT__entry(
768                 __field(dev_t,          dev             )
769                 __field(u64,            keys_moved      )
770                 __field(u64,            keys_raced      )
771                 __field(u64,            sectors_seen    )
772                 __field(u64,            sectors_moved   )
773                 __field(u64,            sectors_raced   )
774         ),
775
776         TP_fast_assign(
777                 __entry->dev            = c->dev;
778                 __entry->keys_moved     = atomic64_read(&stats->keys_moved);
779                 __entry->keys_raced     = atomic64_read(&stats->keys_raced);
780                 __entry->sectors_seen   = atomic64_read(&stats->sectors_seen);
781                 __entry->sectors_moved  = atomic64_read(&stats->sectors_moved);
782                 __entry->sectors_raced  = atomic64_read(&stats->sectors_raced);
783         ),
784
785         TP_printk("%d,%d keys moved %llu raced %llu"
786                   "sectors seen %llu moved %llu raced %llu",
787                   MAJOR(__entry->dev), MINOR(__entry->dev),
788                   __entry->keys_moved,
789                   __entry->keys_raced,
790                   __entry->sectors_seen,
791                   __entry->sectors_moved,
792                   __entry->sectors_raced)
793 );
794
795 TRACE_EVENT(evacuate_bucket,
796         TP_PROTO(struct bch_fs *c, struct bpos *bucket,
797                  unsigned sectors, unsigned bucket_size,
798                  u64 fragmentation, int ret),
799         TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
800
801         TP_STRUCT__entry(
802                 __field(dev_t,          dev             )
803                 __field(u64,            member          )
804                 __field(u64,            bucket          )
805                 __field(u32,            sectors         )
806                 __field(u32,            bucket_size     )
807                 __field(u64,            fragmentation   )
808                 __field(int,            ret             )
809         ),
810
811         TP_fast_assign(
812                 __entry->dev                    = c->dev;
813                 __entry->member                 = bucket->inode;
814                 __entry->bucket                 = bucket->offset;
815                 __entry->sectors                = sectors;
816                 __entry->bucket_size            = bucket_size;
817                 __entry->fragmentation          = fragmentation;
818                 __entry->ret                    = ret;
819         ),
820
821         TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
822                   MAJOR(__entry->dev), MINOR(__entry->dev),
823                   __entry->member, __entry->bucket,
824                   __entry->sectors, __entry->bucket_size,
825                   __entry->fragmentation, __entry->ret)
826 );
827
828 TRACE_EVENT(copygc,
829         TP_PROTO(struct bch_fs *c,
830                  u64 sectors_moved, u64 sectors_not_moved,
831                  u64 buckets_moved, u64 buckets_not_moved),
832         TP_ARGS(c,
833                 sectors_moved, sectors_not_moved,
834                 buckets_moved, buckets_not_moved),
835
836         TP_STRUCT__entry(
837                 __field(dev_t,          dev                     )
838                 __field(u64,            sectors_moved           )
839                 __field(u64,            sectors_not_moved       )
840                 __field(u64,            buckets_moved           )
841                 __field(u64,            buckets_not_moved       )
842         ),
843
844         TP_fast_assign(
845                 __entry->dev                    = c->dev;
846                 __entry->sectors_moved          = sectors_moved;
847                 __entry->sectors_not_moved      = sectors_not_moved;
848                 __entry->buckets_moved          = buckets_moved;
849                 __entry->buckets_not_moved = buckets_moved;
850         ),
851
852         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
853                   MAJOR(__entry->dev), MINOR(__entry->dev),
854                   __entry->sectors_moved, __entry->sectors_not_moved,
855                   __entry->buckets_moved, __entry->buckets_not_moved)
856 );
857
858 TRACE_EVENT(copygc_wait,
859         TP_PROTO(struct bch_fs *c,
860                  u64 wait_amount, u64 until),
861         TP_ARGS(c, wait_amount, until),
862
863         TP_STRUCT__entry(
864                 __field(dev_t,          dev                     )
865                 __field(u64,            wait_amount             )
866                 __field(u64,            until                   )
867         ),
868
869         TP_fast_assign(
870                 __entry->dev            = c->dev;
871                 __entry->wait_amount    = wait_amount;
872                 __entry->until          = until;
873         ),
874
875         TP_printk("%d,%u waiting for %llu sectors until %llu",
876                   MAJOR(__entry->dev), MINOR(__entry->dev),
877                   __entry->wait_amount, __entry->until)
878 );
879
880 /* btree transactions: */
881
882 DECLARE_EVENT_CLASS(transaction_event,
883         TP_PROTO(struct btree_trans *trans,
884                  unsigned long caller_ip),
885         TP_ARGS(trans, caller_ip),
886
887         TP_STRUCT__entry(
888                 __array(char,                   trans_fn, 32    )
889                 __field(unsigned long,          caller_ip       )
890         ),
891
892         TP_fast_assign(
893                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
894                 __entry->caller_ip              = caller_ip;
895         ),
896
897         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
898 );
899
900 DEFINE_EVENT(transaction_event, transaction_commit,
901         TP_PROTO(struct btree_trans *trans,
902                  unsigned long caller_ip),
903         TP_ARGS(trans, caller_ip)
904 );
905
906 DEFINE_EVENT(transaction_event, trans_restart_injected,
907         TP_PROTO(struct btree_trans *trans,
908                  unsigned long caller_ip),
909         TP_ARGS(trans, caller_ip)
910 );
911
912 TRACE_EVENT(trans_restart_split_race,
913         TP_PROTO(struct btree_trans *trans,
914                  unsigned long caller_ip,
915                  struct btree *b),
916         TP_ARGS(trans, caller_ip, b),
917
918         TP_STRUCT__entry(
919                 __array(char,                   trans_fn, 32    )
920                 __field(unsigned long,          caller_ip       )
921                 __field(u8,                     level           )
922                 __field(u16,                    written         )
923                 __field(u16,                    blocks          )
924                 __field(u16,                    u64s_remaining  )
925         ),
926
927         TP_fast_assign(
928                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
929                 __entry->caller_ip              = caller_ip;
930                 __entry->level          = b->c.level;
931                 __entry->written        = b->written;
932                 __entry->blocks         = btree_blocks(trans->c);
933                 __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
934         ),
935
936         TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
937                   __entry->trans_fn, (void *) __entry->caller_ip,
938                   __entry->level,
939                   __entry->written, __entry->blocks,
940                   __entry->u64s_remaining)
941 );
942
943 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
944         TP_PROTO(struct btree_trans *trans,
945                  unsigned long caller_ip),
946         TP_ARGS(trans, caller_ip)
947 );
948
949 TRACE_EVENT(trans_restart_journal_preres_get,
950         TP_PROTO(struct btree_trans *trans,
951                  unsigned long caller_ip,
952                  unsigned flags),
953         TP_ARGS(trans, caller_ip, flags),
954
955         TP_STRUCT__entry(
956                 __array(char,                   trans_fn, 32    )
957                 __field(unsigned long,          caller_ip       )
958                 __field(unsigned,               flags           )
959         ),
960
961         TP_fast_assign(
962                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
963                 __entry->caller_ip              = caller_ip;
964                 __entry->flags                  = flags;
965         ),
966
967         TP_printk("%s %pS %x", __entry->trans_fn,
968                   (void *) __entry->caller_ip,
969                   __entry->flags)
970 );
971
972 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
973         TP_PROTO(struct btree_trans *trans,
974                  unsigned long caller_ip),
975         TP_ARGS(trans, caller_ip)
976 );
977
978 DEFINE_EVENT(transaction_event, trans_traverse_all,
979         TP_PROTO(struct btree_trans *trans,
980                  unsigned long caller_ip),
981         TP_ARGS(trans, caller_ip)
982 );
983
984 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
985         TP_PROTO(struct btree_trans *trans,
986                  unsigned long caller_ip),
987         TP_ARGS(trans, caller_ip)
988 );
989
990 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
991         TP_PROTO(struct btree_trans *trans,
992                  unsigned long caller_ip),
993         TP_ARGS(trans, caller_ip)
994 );
995
996 DECLARE_EVENT_CLASS(transaction_restart_iter,
997         TP_PROTO(struct btree_trans *trans,
998                  unsigned long caller_ip,
999                  struct btree_path *path),
1000         TP_ARGS(trans, caller_ip, path),
1001
1002         TP_STRUCT__entry(
1003                 __array(char,                   trans_fn, 32    )
1004                 __field(unsigned long,          caller_ip       )
1005                 __field(u8,                     btree_id        )
1006                 TRACE_BPOS_entries(pos)
1007         ),
1008
1009         TP_fast_assign(
1010                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1011                 __entry->caller_ip              = caller_ip;
1012                 __entry->btree_id               = path->btree_id;
1013                 TRACE_BPOS_assign(pos, path->pos)
1014         ),
1015
1016         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1017                   __entry->trans_fn,
1018                   (void *) __entry->caller_ip,
1019                   bch2_btree_id_str(__entry->btree_id),
1020                   __entry->pos_inode,
1021                   __entry->pos_offset,
1022                   __entry->pos_snapshot)
1023 );
1024
1025 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
1026         TP_PROTO(struct btree_trans *trans,
1027                  unsigned long caller_ip,
1028                  struct btree_path *path),
1029         TP_ARGS(trans, caller_ip, path)
1030 );
1031
1032 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
1033         TP_PROTO(struct btree_trans *trans,
1034                  unsigned long caller_ip,
1035                  struct btree_path *path),
1036         TP_ARGS(trans, caller_ip, path)
1037 );
1038
1039 struct get_locks_fail;
1040
1041 TRACE_EVENT(trans_restart_upgrade,
1042         TP_PROTO(struct btree_trans *trans,
1043                  unsigned long caller_ip,
1044                  struct btree_path *path,
1045                  unsigned old_locks_want,
1046                  unsigned new_locks_want,
1047                  struct get_locks_fail *f),
1048         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1049
1050         TP_STRUCT__entry(
1051                 __array(char,                   trans_fn, 32    )
1052                 __field(unsigned long,          caller_ip       )
1053                 __field(u8,                     btree_id        )
1054                 __field(u8,                     old_locks_want  )
1055                 __field(u8,                     new_locks_want  )
1056                 __field(u8,                     level           )
1057                 __field(u32,                    path_seq        )
1058                 __field(u32,                    node_seq        )
1059                 __field(u32,                    path_alloc_seq  )
1060                 __field(u32,                    downgrade_seq)
1061                 TRACE_BPOS_entries(pos)
1062         ),
1063
1064         TP_fast_assign(
1065                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1066                 __entry->caller_ip              = caller_ip;
1067                 __entry->btree_id               = path->btree_id;
1068                 __entry->old_locks_want         = old_locks_want;
1069                 __entry->new_locks_want         = new_locks_want;
1070                 __entry->level                  = f->l;
1071                 __entry->path_seq               = path->l[f->l].lock_seq;
1072                 __entry->node_seq               = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1073                 __entry->path_alloc_seq         = path->alloc_seq;
1074                 __entry->downgrade_seq          = path->downgrade_seq;
1075                 TRACE_BPOS_assign(pos, path->pos)
1076         ),
1077
1078         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
1079                   __entry->trans_fn,
1080                   (void *) __entry->caller_ip,
1081                   bch2_btree_id_str(__entry->btree_id),
1082                   __entry->pos_inode,
1083                   __entry->pos_offset,
1084                   __entry->pos_snapshot,
1085                   __entry->old_locks_want,
1086                   __entry->new_locks_want,
1087                   __entry->level,
1088                   __entry->path_seq,
1089                   __entry->node_seq,
1090                   __entry->path_alloc_seq,
1091                   __entry->downgrade_seq)
1092 );
1093
1094 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
1095         TP_PROTO(struct btree_trans *trans,
1096                  unsigned long caller_ip,
1097                  struct btree_path *path),
1098         TP_ARGS(trans, caller_ip, path)
1099 );
1100
1101 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
1102         TP_PROTO(struct btree_trans *trans,
1103                  unsigned long caller_ip,
1104                  struct btree_path *path),
1105         TP_ARGS(trans, caller_ip, path)
1106 );
1107
1108 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
1109         TP_PROTO(struct btree_trans *trans,
1110                  unsigned long caller_ip,
1111                  struct btree_path *path),
1112         TP_ARGS(trans, caller_ip, path)
1113 );
1114
1115 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
1116         TP_PROTO(struct btree_trans *trans,
1117                  unsigned long caller_ip,
1118                  struct btree_path *path),
1119         TP_ARGS(trans, caller_ip, path)
1120 );
1121
1122 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1123         TP_PROTO(struct btree_trans *trans,
1124                  unsigned long caller_ip),
1125         TP_ARGS(trans, caller_ip)
1126 );
1127
1128 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
1129         TP_PROTO(struct btree_trans *trans,
1130                  unsigned long caller_ip,
1131                  struct btree_path *path),
1132         TP_ARGS(trans, caller_ip, path)
1133 );
1134
1135 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
1136         TP_PROTO(struct btree_trans *trans,
1137                  unsigned long caller_ip,
1138                  struct btree_path *path),
1139         TP_ARGS(trans, caller_ip, path)
1140 );
1141
1142 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
1143         TP_PROTO(struct btree_trans *trans,
1144                  unsigned long caller_ip,
1145                  struct btree_path *path),
1146         TP_ARGS(trans, caller_ip, path)
1147 );
1148
1149 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
1150         TP_PROTO(struct btree_trans *trans,
1151                  unsigned long caller_ip,
1152                  struct btree_path *path),
1153         TP_ARGS(trans, caller_ip, path)
1154 );
1155
1156 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1157         TP_PROTO(struct btree_trans *trans,
1158                  unsigned long caller_ip,
1159                  struct btree_path *path),
1160         TP_ARGS(trans, caller_ip, path)
1161 );
1162
1163 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
1164         TP_PROTO(struct btree_trans *trans,
1165                  unsigned long caller_ip),
1166         TP_ARGS(trans, caller_ip)
1167 );
1168
1169 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1170         TP_PROTO(struct btree_trans *trans,
1171                  unsigned long caller_ip),
1172         TP_ARGS(trans, caller_ip)
1173 );
1174
1175 TRACE_EVENT(trans_restart_would_deadlock_write,
1176         TP_PROTO(struct btree_trans *trans),
1177         TP_ARGS(trans),
1178
1179         TP_STRUCT__entry(
1180                 __array(char,                   trans_fn, 32    )
1181         ),
1182
1183         TP_fast_assign(
1184                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1185         ),
1186
1187         TP_printk("%s", __entry->trans_fn)
1188 );
1189
1190 TRACE_EVENT(trans_restart_mem_realloced,
1191         TP_PROTO(struct btree_trans *trans,
1192                  unsigned long caller_ip,
1193                  unsigned long bytes),
1194         TP_ARGS(trans, caller_ip, bytes),
1195
1196         TP_STRUCT__entry(
1197                 __array(char,                   trans_fn, 32    )
1198                 __field(unsigned long,          caller_ip       )
1199                 __field(unsigned long,          bytes           )
1200         ),
1201
1202         TP_fast_assign(
1203                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1204                 __entry->caller_ip      = caller_ip;
1205                 __entry->bytes          = bytes;
1206         ),
1207
1208         TP_printk("%s %pS bytes %lu",
1209                   __entry->trans_fn,
1210                   (void *) __entry->caller_ip,
1211                   __entry->bytes)
1212 );
1213
1214 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1215         TP_PROTO(struct btree_trans *trans,
1216                  unsigned long caller_ip,
1217                  struct btree_path *path,
1218                  unsigned old_u64s,
1219                  unsigned new_u64s),
1220         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1221
1222         TP_STRUCT__entry(
1223                 __array(char,                   trans_fn, 32    )
1224                 __field(unsigned long,          caller_ip       )
1225                 __field(enum btree_id,          btree_id        )
1226                 TRACE_BPOS_entries(pos)
1227                 __field(u32,                    old_u64s        )
1228                 __field(u32,                    new_u64s        )
1229         ),
1230
1231         TP_fast_assign(
1232                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1233                 __entry->caller_ip              = caller_ip;
1234
1235                 __entry->btree_id       = path->btree_id;
1236                 TRACE_BPOS_assign(pos, path->pos);
1237                 __entry->old_u64s       = old_u64s;
1238                 __entry->new_u64s       = new_u64s;
1239         ),
1240
1241         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1242                   __entry->trans_fn,
1243                   (void *) __entry->caller_ip,
1244                   bch2_btree_id_str(__entry->btree_id),
1245                   __entry->pos_inode,
1246                   __entry->pos_offset,
1247                   __entry->pos_snapshot,
1248                   __entry->old_u64s,
1249                   __entry->new_u64s)
1250 );
1251
1252 TRACE_EVENT(path_downgrade,
1253         TP_PROTO(struct btree_trans *trans,
1254                  unsigned long caller_ip,
1255                  struct btree_path *path,
1256                  unsigned old_locks_want),
1257         TP_ARGS(trans, caller_ip, path, old_locks_want),
1258
1259         TP_STRUCT__entry(
1260                 __array(char,                   trans_fn, 32    )
1261                 __field(unsigned long,          caller_ip       )
1262                 __field(unsigned,               old_locks_want  )
1263                 __field(unsigned,               new_locks_want  )
1264                 __field(unsigned,               btree           )
1265                 TRACE_BPOS_entries(pos)
1266         ),
1267
1268         TP_fast_assign(
1269                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1270                 __entry->caller_ip              = caller_ip;
1271                 __entry->old_locks_want         = old_locks_want;
1272                 __entry->new_locks_want         = path->locks_want;
1273                 __entry->btree                  = path->btree_id;
1274                 TRACE_BPOS_assign(pos, path->pos);
1275         ),
1276
1277         TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1278                   __entry->trans_fn,
1279                   (void *) __entry->caller_ip,
1280                   __entry->old_locks_want,
1281                   __entry->new_locks_want,
1282                   bch2_btree_id_str(__entry->btree),
1283                   __entry->pos_inode,
1284                   __entry->pos_offset,
1285                   __entry->pos_snapshot)
1286 );
1287
1288 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1289         TP_PROTO(struct btree_trans *trans,
1290                  unsigned long caller_ip),
1291         TP_ARGS(trans, caller_ip)
1292 );
1293
1294 TRACE_EVENT(write_buffer_flush,
1295         TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1296         TP_ARGS(trans, nr, skipped, fast, size),
1297
1298         TP_STRUCT__entry(
1299                 __field(size_t,         nr              )
1300                 __field(size_t,         skipped         )
1301                 __field(size_t,         fast            )
1302                 __field(size_t,         size            )
1303         ),
1304
1305         TP_fast_assign(
1306                 __entry->nr     = nr;
1307                 __entry->skipped = skipped;
1308                 __entry->fast   = fast;
1309                 __entry->size   = size;
1310         ),
1311
1312         TP_printk("%zu/%zu skipped %zu fast %zu",
1313                   __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1314 );
1315
1316 TRACE_EVENT(write_buffer_flush_slowpath,
1317         TP_PROTO(struct btree_trans *trans, size_t nr, size_t size),
1318         TP_ARGS(trans, nr, size),
1319
1320         TP_STRUCT__entry(
1321                 __field(size_t,         nr              )
1322                 __field(size_t,         size            )
1323         ),
1324
1325         TP_fast_assign(
1326                 __entry->nr     = nr;
1327                 __entry->size   = size;
1328         ),
1329
1330         TP_printk("%zu/%zu", __entry->nr, __entry->size)
1331 );
1332
1333 #endif /* _TRACE_BCACHEFS_H */
1334
1335 /* This part must be outside protection */
1336 #undef TRACE_INCLUDE_PATH
1337 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1338
1339 #undef TRACE_INCLUDE_FILE
1340 #define TRACE_INCLUDE_FILE trace
1341
1342 #include <trace/define_trace.h>