]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/trace.h
Update bcachefs sources to 783085c3cc44 kbuild: Allow gcov to be enabled on the comma...
[bcachefs-tools-debian] / libbcachefs / trace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(bkey,
36         TP_PROTO(struct bch_fs *c, const char *k),
37         TP_ARGS(c, k),
38
39         TP_STRUCT__entry(
40                 __string(k,     k                               )
41         ),
42
43         TP_fast_assign(
44                 __assign_str(k, k);
45         ),
46
47         TP_printk("%s", __get_str(k))
48 );
49
50 DECLARE_EVENT_CLASS(btree_node,
51         TP_PROTO(struct bch_fs *c, struct btree *b),
52         TP_ARGS(c, b),
53
54         TP_STRUCT__entry(
55                 __field(dev_t,          dev                     )
56                 __field(u8,             level                   )
57                 __field(u8,             btree_id                )
58                 TRACE_BPOS_entries(pos)
59         ),
60
61         TP_fast_assign(
62                 __entry->dev            = c->dev;
63                 __entry->level          = b->c.level;
64                 __entry->btree_id       = b->c.btree_id;
65                 TRACE_BPOS_assign(pos, b->key.k.p);
66         ),
67
68         TP_printk("%d,%d %u %s %llu:%llu:%u",
69                   MAJOR(__entry->dev), MINOR(__entry->dev),
70                   __entry->level,
71                   bch2_btree_id_str(__entry->btree_id),
72                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
73 );
74
75 DECLARE_EVENT_CLASS(bch_fs,
76         TP_PROTO(struct bch_fs *c),
77         TP_ARGS(c),
78
79         TP_STRUCT__entry(
80                 __field(dev_t,          dev                     )
81         ),
82
83         TP_fast_assign(
84                 __entry->dev            = c->dev;
85         ),
86
87         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
88 );
89
90 DECLARE_EVENT_CLASS(bio,
91         TP_PROTO(struct bio *bio),
92         TP_ARGS(bio),
93
94         TP_STRUCT__entry(
95                 __field(dev_t,          dev                     )
96                 __field(sector_t,       sector                  )
97                 __field(unsigned int,   nr_sector               )
98                 __array(char,           rwbs,   6               )
99         ),
100
101         TP_fast_assign(
102                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
103                 __entry->sector         = bio->bi_iter.bi_sector;
104                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
105                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
106         ),
107
108         TP_printk("%d,%d  %s %llu + %u",
109                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110                   (unsigned long long)__entry->sector, __entry->nr_sector)
111 );
112
113 /* super-io.c: */
114 TRACE_EVENT(write_super,
115         TP_PROTO(struct bch_fs *c, unsigned long ip),
116         TP_ARGS(c, ip),
117
118         TP_STRUCT__entry(
119                 __field(dev_t,          dev     )
120                 __field(unsigned long,  ip      )
121         ),
122
123         TP_fast_assign(
124                 __entry->dev            = c->dev;
125                 __entry->ip             = ip;
126         ),
127
128         TP_printk("%d,%d for %pS",
129                   MAJOR(__entry->dev), MINOR(__entry->dev),
130                   (void *) __entry->ip)
131 );
132
133 /* io.c: */
134
135 DEFINE_EVENT(bio, read_promote,
136         TP_PROTO(struct bio *bio),
137         TP_ARGS(bio)
138 );
139
140 TRACE_EVENT(read_nopromote,
141         TP_PROTO(struct bch_fs *c, int ret),
142         TP_ARGS(c, ret),
143
144         TP_STRUCT__entry(
145                 __field(dev_t,          dev             )
146                 __array(char,           ret, 32         )
147         ),
148
149         TP_fast_assign(
150                 __entry->dev            = c->dev;
151                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
152         ),
153
154         TP_printk("%d,%d ret %s",
155                   MAJOR(__entry->dev), MINOR(__entry->dev),
156                   __entry->ret)
157 );
158
159 DEFINE_EVENT(bio, read_bounce,
160         TP_PROTO(struct bio *bio),
161         TP_ARGS(bio)
162 );
163
164 DEFINE_EVENT(bio, read_split,
165         TP_PROTO(struct bio *bio),
166         TP_ARGS(bio)
167 );
168
169 DEFINE_EVENT(bio, read_retry,
170         TP_PROTO(struct bio *bio),
171         TP_ARGS(bio)
172 );
173
174 DEFINE_EVENT(bio, read_reuse_race,
175         TP_PROTO(struct bio *bio),
176         TP_ARGS(bio)
177 );
178
179 /* Journal */
180
181 DEFINE_EVENT(bch_fs, journal_full,
182         TP_PROTO(struct bch_fs *c),
183         TP_ARGS(c)
184 );
185
186 DEFINE_EVENT(bch_fs, journal_entry_full,
187         TP_PROTO(struct bch_fs *c),
188         TP_ARGS(c)
189 );
190
191 TRACE_EVENT(journal_entry_close,
192         TP_PROTO(struct bch_fs *c, unsigned bytes),
193         TP_ARGS(c, bytes),
194
195         TP_STRUCT__entry(
196                 __field(dev_t,          dev                     )
197                 __field(u32,            bytes                   )
198         ),
199
200         TP_fast_assign(
201                 __entry->dev                    = c->dev;
202                 __entry->bytes                  = bytes;
203         ),
204
205         TP_printk("%d,%d entry bytes %u",
206                   MAJOR(__entry->dev), MINOR(__entry->dev),
207                   __entry->bytes)
208 );
209
210 DEFINE_EVENT(bio, journal_write,
211         TP_PROTO(struct bio *bio),
212         TP_ARGS(bio)
213 );
214
215 TRACE_EVENT(journal_reclaim_start,
216         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
217                  u64 min_nr, u64 min_key_cache,
218                  u64 btree_cache_dirty, u64 btree_cache_total,
219                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
220         TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
221                 btree_cache_dirty, btree_cache_total,
222                 btree_key_cache_dirty, btree_key_cache_total),
223
224         TP_STRUCT__entry(
225                 __field(dev_t,          dev                     )
226                 __field(bool,           direct                  )
227                 __field(bool,           kicked                  )
228                 __field(u64,            min_nr                  )
229                 __field(u64,            min_key_cache           )
230                 __field(u64,            btree_cache_dirty       )
231                 __field(u64,            btree_cache_total       )
232                 __field(u64,            btree_key_cache_dirty   )
233                 __field(u64,            btree_key_cache_total   )
234         ),
235
236         TP_fast_assign(
237                 __entry->dev                    = c->dev;
238                 __entry->direct                 = direct;
239                 __entry->kicked                 = kicked;
240                 __entry->min_nr                 = min_nr;
241                 __entry->min_key_cache          = min_key_cache;
242                 __entry->btree_cache_dirty      = btree_cache_dirty;
243                 __entry->btree_cache_total      = btree_cache_total;
244                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
245                 __entry->btree_key_cache_total  = btree_key_cache_total;
246         ),
247
248         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
249                   MAJOR(__entry->dev), MINOR(__entry->dev),
250                   __entry->direct,
251                   __entry->kicked,
252                   __entry->min_nr,
253                   __entry->min_key_cache,
254                   __entry->btree_cache_dirty,
255                   __entry->btree_cache_total,
256                   __entry->btree_key_cache_dirty,
257                   __entry->btree_key_cache_total)
258 );
259
260 TRACE_EVENT(journal_reclaim_finish,
261         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
262         TP_ARGS(c, nr_flushed),
263
264         TP_STRUCT__entry(
265                 __field(dev_t,          dev                     )
266                 __field(u64,            nr_flushed              )
267         ),
268
269         TP_fast_assign(
270                 __entry->dev            = c->dev;
271                 __entry->nr_flushed     = nr_flushed;
272         ),
273
274         TP_printk("%d,%d flushed %llu",
275                   MAJOR(__entry->dev), MINOR(__entry->dev),
276                   __entry->nr_flushed)
277 );
278
279 /* bset.c: */
280
281 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
282         TP_PROTO(const struct bpos *p),
283         TP_ARGS(p)
284 );
285
286 /* Btree cache: */
287
288 TRACE_EVENT(btree_cache_scan,
289         TP_PROTO(long nr_to_scan, long can_free, long ret),
290         TP_ARGS(nr_to_scan, can_free, ret),
291
292         TP_STRUCT__entry(
293                 __field(long,   nr_to_scan              )
294                 __field(long,   can_free                )
295                 __field(long,   ret                     )
296         ),
297
298         TP_fast_assign(
299                 __entry->nr_to_scan     = nr_to_scan;
300                 __entry->can_free       = can_free;
301                 __entry->ret            = ret;
302         ),
303
304         TP_printk("scanned for %li nodes, can free %li, ret %li",
305                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
306 );
307
308 DEFINE_EVENT(btree_node, btree_cache_reap,
309         TP_PROTO(struct bch_fs *c, struct btree *b),
310         TP_ARGS(c, b)
311 );
312
313 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
314         TP_PROTO(struct bch_fs *c),
315         TP_ARGS(c)
316 );
317
318 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
319         TP_PROTO(struct bch_fs *c),
320         TP_ARGS(c)
321 );
322
323 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
324         TP_PROTO(struct bch_fs *c),
325         TP_ARGS(c)
326 );
327
328 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
329         TP_PROTO(struct bch_fs *c),
330         TP_ARGS(c)
331 );
332
333 /* Btree */
334
335 DEFINE_EVENT(btree_node, btree_node_read,
336         TP_PROTO(struct bch_fs *c, struct btree *b),
337         TP_ARGS(c, b)
338 );
339
340 TRACE_EVENT(btree_node_write,
341         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
342         TP_ARGS(b, bytes, sectors),
343
344         TP_STRUCT__entry(
345                 __field(enum btree_node_type,   type)
346                 __field(unsigned,       bytes                   )
347                 __field(unsigned,       sectors                 )
348         ),
349
350         TP_fast_assign(
351                 __entry->type   = btree_node_type(b);
352                 __entry->bytes  = bytes;
353                 __entry->sectors = sectors;
354         ),
355
356         TP_printk("bkey type %u bytes %u sectors %u",
357                   __entry->type , __entry->bytes, __entry->sectors)
358 );
359
360 DEFINE_EVENT(btree_node, btree_node_alloc,
361         TP_PROTO(struct bch_fs *c, struct btree *b),
362         TP_ARGS(c, b)
363 );
364
365 DEFINE_EVENT(btree_node, btree_node_free,
366         TP_PROTO(struct bch_fs *c, struct btree *b),
367         TP_ARGS(c, b)
368 );
369
370 TRACE_EVENT(btree_reserve_get_fail,
371         TP_PROTO(const char *trans_fn,
372                  unsigned long caller_ip,
373                  size_t required,
374                  int ret),
375         TP_ARGS(trans_fn, caller_ip, required, ret),
376
377         TP_STRUCT__entry(
378                 __array(char,                   trans_fn, 32    )
379                 __field(unsigned long,          caller_ip       )
380                 __field(size_t,                 required        )
381                 __array(char,                   ret, 32         )
382         ),
383
384         TP_fast_assign(
385                 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
386                 __entry->caller_ip      = caller_ip;
387                 __entry->required       = required;
388                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
389         ),
390
391         TP_printk("%s %pS required %zu ret %s",
392                   __entry->trans_fn,
393                   (void *) __entry->caller_ip,
394                   __entry->required,
395                   __entry->ret)
396 );
397
398 DEFINE_EVENT(btree_node, btree_node_compact,
399         TP_PROTO(struct bch_fs *c, struct btree *b),
400         TP_ARGS(c, b)
401 );
402
403 DEFINE_EVENT(btree_node, btree_node_merge,
404         TP_PROTO(struct bch_fs *c, struct btree *b),
405         TP_ARGS(c, b)
406 );
407
408 DEFINE_EVENT(btree_node, btree_node_split,
409         TP_PROTO(struct bch_fs *c, struct btree *b),
410         TP_ARGS(c, b)
411 );
412
413 DEFINE_EVENT(btree_node, btree_node_rewrite,
414         TP_PROTO(struct bch_fs *c, struct btree *b),
415         TP_ARGS(c, b)
416 );
417
418 DEFINE_EVENT(btree_node, btree_node_set_root,
419         TP_PROTO(struct bch_fs *c, struct btree *b),
420         TP_ARGS(c, b)
421 );
422
423 TRACE_EVENT(btree_path_relock_fail,
424         TP_PROTO(struct btree_trans *trans,
425                  unsigned long caller_ip,
426                  struct btree_path *path,
427                  unsigned level),
428         TP_ARGS(trans, caller_ip, path, level),
429
430         TP_STRUCT__entry(
431                 __array(char,                   trans_fn, 32    )
432                 __field(unsigned long,          caller_ip       )
433                 __field(u8,                     btree_id        )
434                 __field(u8,                     level           )
435                 TRACE_BPOS_entries(pos)
436                 __array(char,                   node, 24        )
437                 __field(u8,                     self_read_count )
438                 __field(u8,                     self_intent_count)
439                 __field(u8,                     read_count      )
440                 __field(u8,                     intent_count    )
441                 __field(u32,                    iter_lock_seq   )
442                 __field(u32,                    node_lock_seq   )
443         ),
444
445         TP_fast_assign(
446                 struct btree *b = btree_path_node(path, level);
447                 struct six_lock_count c;
448
449                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
450                 __entry->caller_ip              = caller_ip;
451                 __entry->btree_id               = path->btree_id;
452                 __entry->level                  = path->level;
453                 TRACE_BPOS_assign(pos, path->pos);
454
455                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
456                 __entry->self_read_count        = c.n[SIX_LOCK_read];
457                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
458
459                 if (IS_ERR(b)) {
460                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
461                 } else {
462                         c = six_lock_counts(&path->l[level].b->c.lock);
463                         __entry->read_count     = c.n[SIX_LOCK_read];
464                         __entry->intent_count   = c.n[SIX_LOCK_intent];
465                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
466                 }
467                 __entry->iter_lock_seq          = path->l[level].lock_seq;
468                 __entry->node_lock_seq          = is_btree_node(path, level)
469                         ? six_lock_seq(&path->l[level].b->c.lock)
470                         : 0;
471         ),
472
473         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
474                   __entry->trans_fn,
475                   (void *) __entry->caller_ip,
476                   bch2_btree_id_str(__entry->btree_id),
477                   __entry->pos_inode,
478                   __entry->pos_offset,
479                   __entry->pos_snapshot,
480                   __entry->level,
481                   __entry->node,
482                   __entry->self_read_count,
483                   __entry->self_intent_count,
484                   __entry->read_count,
485                   __entry->intent_count,
486                   __entry->iter_lock_seq,
487                   __entry->node_lock_seq)
488 );
489
490 TRACE_EVENT(btree_path_upgrade_fail,
491         TP_PROTO(struct btree_trans *trans,
492                  unsigned long caller_ip,
493                  struct btree_path *path,
494                  unsigned level),
495         TP_ARGS(trans, caller_ip, path, level),
496
497         TP_STRUCT__entry(
498                 __array(char,                   trans_fn, 32    )
499                 __field(unsigned long,          caller_ip       )
500                 __field(u8,                     btree_id        )
501                 __field(u8,                     level           )
502                 TRACE_BPOS_entries(pos)
503                 __field(u8,                     locked          )
504                 __field(u8,                     self_read_count )
505                 __field(u8,                     self_intent_count)
506                 __field(u8,                     read_count      )
507                 __field(u8,                     intent_count    )
508                 __field(u32,                    iter_lock_seq   )
509                 __field(u32,                    node_lock_seq   )
510         ),
511
512         TP_fast_assign(
513                 struct six_lock_count c;
514
515                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
516                 __entry->caller_ip              = caller_ip;
517                 __entry->btree_id               = path->btree_id;
518                 __entry->level                  = level;
519                 TRACE_BPOS_assign(pos, path->pos);
520                 __entry->locked                 = btree_node_locked(path, level);
521
522                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
523                 __entry->self_read_count        = c.n[SIX_LOCK_read];
524                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
525                 c = six_lock_counts(&path->l[level].b->c.lock);
526                 __entry->read_count             = c.n[SIX_LOCK_read];
527                 __entry->intent_count           = c.n[SIX_LOCK_intent];
528                 __entry->iter_lock_seq          = path->l[level].lock_seq;
529                 __entry->node_lock_seq          = is_btree_node(path, level)
530                         ? six_lock_seq(&path->l[level].b->c.lock)
531                         : 0;
532         ),
533
534         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
535                   __entry->trans_fn,
536                   (void *) __entry->caller_ip,
537                   bch2_btree_id_str(__entry->btree_id),
538                   __entry->pos_inode,
539                   __entry->pos_offset,
540                   __entry->pos_snapshot,
541                   __entry->level,
542                   __entry->locked,
543                   __entry->self_read_count,
544                   __entry->self_intent_count,
545                   __entry->read_count,
546                   __entry->intent_count,
547                   __entry->iter_lock_seq,
548                   __entry->node_lock_seq)
549 );
550
551 /* Garbage collection */
552
553 DEFINE_EVENT(bch_fs, gc_gens_start,
554         TP_PROTO(struct bch_fs *c),
555         TP_ARGS(c)
556 );
557
558 DEFINE_EVENT(bch_fs, gc_gens_end,
559         TP_PROTO(struct bch_fs *c),
560         TP_ARGS(c)
561 );
562
563 /* Allocator */
564
565 DECLARE_EVENT_CLASS(bucket_alloc,
566         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
567                  u64 bucket,
568                  u64 free,
569                  u64 avail,
570                  u64 copygc_wait_amount,
571                  s64 copygc_waiting_for,
572                  struct bucket_alloc_state *s,
573                  bool nonblocking,
574                  const char *err),
575         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
576                 copygc_wait_amount, copygc_waiting_for,
577                 s, nonblocking, err),
578
579         TP_STRUCT__entry(
580                 __field(u8,                     dev                     )
581                 __array(char,   reserve,        16                      )
582                 __field(u64,                    bucket  )
583                 __field(u64,                    free                    )
584                 __field(u64,                    avail                   )
585                 __field(u64,                    copygc_wait_amount      )
586                 __field(s64,                    copygc_waiting_for      )
587                 __field(u64,                    seen                    )
588                 __field(u64,                    open                    )
589                 __field(u64,                    need_journal_commit     )
590                 __field(u64,                    nouse                   )
591                 __field(bool,                   nonblocking             )
592                 __field(u64,                    nocow                   )
593                 __array(char,                   err,    32              )
594         ),
595
596         TP_fast_assign(
597                 __entry->dev            = ca->dev_idx;
598                 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
599                 __entry->bucket         = bucket;
600                 __entry->free           = free;
601                 __entry->avail          = avail;
602                 __entry->copygc_wait_amount     = copygc_wait_amount;
603                 __entry->copygc_waiting_for     = copygc_waiting_for;
604                 __entry->seen           = s->buckets_seen;
605                 __entry->open           = s->skipped_open;
606                 __entry->need_journal_commit = s->skipped_need_journal_commit;
607                 __entry->nouse          = s->skipped_nouse;
608                 __entry->nonblocking    = nonblocking;
609                 __entry->nocow          = s->skipped_nocow;
610                 strscpy(__entry->err, err, sizeof(__entry->err));
611         ),
612
613         TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
614                   __entry->reserve,
615                   __entry->dev,
616                   __entry->bucket,
617                   __entry->free,
618                   __entry->avail,
619                   __entry->copygc_wait_amount,
620                   __entry->copygc_waiting_for,
621                   __entry->seen,
622                   __entry->open,
623                   __entry->need_journal_commit,
624                   __entry->nouse,
625                   __entry->nocow,
626                   __entry->nonblocking,
627                   __entry->err)
628 );
629
630 DEFINE_EVENT(bucket_alloc, bucket_alloc,
631         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
632                  u64 bucket,
633                  u64 free,
634                  u64 avail,
635                  u64 copygc_wait_amount,
636                  s64 copygc_waiting_for,
637                  struct bucket_alloc_state *s,
638                  bool nonblocking,
639                  const char *err),
640         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
641                 copygc_wait_amount, copygc_waiting_for,
642                 s, nonblocking, err)
643 );
644
645 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
646         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
647                  u64 bucket,
648                  u64 free,
649                  u64 avail,
650                  u64 copygc_wait_amount,
651                  s64 copygc_waiting_for,
652                  struct bucket_alloc_state *s,
653                  bool nonblocking,
654                  const char *err),
655         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
656                 copygc_wait_amount, copygc_waiting_for,
657                 s, nonblocking, err)
658 );
659
660 TRACE_EVENT(discard_buckets,
661         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
662                  u64 need_journal_commit, u64 discarded, const char *err),
663         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
664
665         TP_STRUCT__entry(
666                 __field(dev_t,          dev                     )
667                 __field(u64,            seen                    )
668                 __field(u64,            open                    )
669                 __field(u64,            need_journal_commit     )
670                 __field(u64,            discarded               )
671                 __array(char,           err,    16              )
672         ),
673
674         TP_fast_assign(
675                 __entry->dev                    = c->dev;
676                 __entry->seen                   = seen;
677                 __entry->open                   = open;
678                 __entry->need_journal_commit    = need_journal_commit;
679                 __entry->discarded              = discarded;
680                 strscpy(__entry->err, err, sizeof(__entry->err));
681         ),
682
683         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
684                   MAJOR(__entry->dev), MINOR(__entry->dev),
685                   __entry->seen,
686                   __entry->open,
687                   __entry->need_journal_commit,
688                   __entry->discarded,
689                   __entry->err)
690 );
691
692 TRACE_EVENT(bucket_invalidate,
693         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
694         TP_ARGS(c, dev, bucket, sectors),
695
696         TP_STRUCT__entry(
697                 __field(dev_t,          dev                     )
698                 __field(u32,            dev_idx                 )
699                 __field(u32,            sectors                 )
700                 __field(u64,            bucket                  )
701         ),
702
703         TP_fast_assign(
704                 __entry->dev            = c->dev;
705                 __entry->dev_idx        = dev;
706                 __entry->sectors        = sectors;
707                 __entry->bucket         = bucket;
708         ),
709
710         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
711                   MAJOR(__entry->dev), MINOR(__entry->dev),
712                   __entry->dev_idx, __entry->bucket,
713                   __entry->sectors)
714 );
715
716 /* Moving IO */
717
718 TRACE_EVENT(bucket_evacuate,
719         TP_PROTO(struct bch_fs *c, struct bpos *bucket),
720         TP_ARGS(c, bucket),
721
722         TP_STRUCT__entry(
723                 __field(dev_t,          dev                     )
724                 __field(u32,            dev_idx                 )
725                 __field(u64,            bucket                  )
726         ),
727
728         TP_fast_assign(
729                 __entry->dev            = c->dev;
730                 __entry->dev_idx        = bucket->inode;
731                 __entry->bucket         = bucket->offset;
732         ),
733
734         TP_printk("%d:%d %u:%llu",
735                   MAJOR(__entry->dev), MINOR(__entry->dev),
736                   __entry->dev_idx, __entry->bucket)
737 );
738
739 DEFINE_EVENT(bkey, move_extent,
740         TP_PROTO(struct bch_fs *c, const char *k),
741         TP_ARGS(c, k)
742 );
743
744 DEFINE_EVENT(bkey, move_extent_read,
745         TP_PROTO(struct bch_fs *c, const char *k),
746         TP_ARGS(c, k)
747 );
748
749 DEFINE_EVENT(bkey, move_extent_write,
750         TP_PROTO(struct bch_fs *c, const char *k),
751         TP_ARGS(c, k)
752 );
753
754 DEFINE_EVENT(bkey, move_extent_finish,
755         TP_PROTO(struct bch_fs *c, const char *k),
756         TP_ARGS(c, k)
757 );
758
759 TRACE_EVENT(move_extent_fail,
760         TP_PROTO(struct bch_fs *c, const char *msg),
761         TP_ARGS(c, msg),
762
763         TP_STRUCT__entry(
764                 __field(dev_t,          dev                     )
765                 __string(msg,           msg                     )
766         ),
767
768         TP_fast_assign(
769                 __entry->dev            = c->dev;
770                 __assign_str(msg, msg);
771         ),
772
773         TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
774 );
775
776 DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
777         TP_PROTO(struct bch_fs *c, const char *k),
778         TP_ARGS(c, k)
779 );
780
781 TRACE_EVENT(move_data,
782         TP_PROTO(struct bch_fs *c,
783                  struct bch_move_stats *stats),
784         TP_ARGS(c, stats),
785
786         TP_STRUCT__entry(
787                 __field(dev_t,          dev             )
788                 __field(u64,            keys_moved      )
789                 __field(u64,            keys_raced      )
790                 __field(u64,            sectors_seen    )
791                 __field(u64,            sectors_moved   )
792                 __field(u64,            sectors_raced   )
793         ),
794
795         TP_fast_assign(
796                 __entry->dev            = c->dev;
797                 __entry->keys_moved     = atomic64_read(&stats->keys_moved);
798                 __entry->keys_raced     = atomic64_read(&stats->keys_raced);
799                 __entry->sectors_seen   = atomic64_read(&stats->sectors_seen);
800                 __entry->sectors_moved  = atomic64_read(&stats->sectors_moved);
801                 __entry->sectors_raced  = atomic64_read(&stats->sectors_raced);
802         ),
803
804         TP_printk("%d,%d keys moved %llu raced %llu"
805                   "sectors seen %llu moved %llu raced %llu",
806                   MAJOR(__entry->dev), MINOR(__entry->dev),
807                   __entry->keys_moved,
808                   __entry->keys_raced,
809                   __entry->sectors_seen,
810                   __entry->sectors_moved,
811                   __entry->sectors_raced)
812 );
813
814 TRACE_EVENT(evacuate_bucket,
815         TP_PROTO(struct bch_fs *c, struct bpos *bucket,
816                  unsigned sectors, unsigned bucket_size,
817                  u64 fragmentation, int ret),
818         TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
819
820         TP_STRUCT__entry(
821                 __field(dev_t,          dev             )
822                 __field(u64,            member          )
823                 __field(u64,            bucket          )
824                 __field(u32,            sectors         )
825                 __field(u32,            bucket_size     )
826                 __field(u64,            fragmentation   )
827                 __field(int,            ret             )
828         ),
829
830         TP_fast_assign(
831                 __entry->dev                    = c->dev;
832                 __entry->member                 = bucket->inode;
833                 __entry->bucket                 = bucket->offset;
834                 __entry->sectors                = sectors;
835                 __entry->bucket_size            = bucket_size;
836                 __entry->fragmentation          = fragmentation;
837                 __entry->ret                    = ret;
838         ),
839
840         TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
841                   MAJOR(__entry->dev), MINOR(__entry->dev),
842                   __entry->member, __entry->bucket,
843                   __entry->sectors, __entry->bucket_size,
844                   __entry->fragmentation, __entry->ret)
845 );
846
847 TRACE_EVENT(copygc,
848         TP_PROTO(struct bch_fs *c,
849                  u64 sectors_moved, u64 sectors_not_moved,
850                  u64 buckets_moved, u64 buckets_not_moved),
851         TP_ARGS(c,
852                 sectors_moved, sectors_not_moved,
853                 buckets_moved, buckets_not_moved),
854
855         TP_STRUCT__entry(
856                 __field(dev_t,          dev                     )
857                 __field(u64,            sectors_moved           )
858                 __field(u64,            sectors_not_moved       )
859                 __field(u64,            buckets_moved           )
860                 __field(u64,            buckets_not_moved       )
861         ),
862
863         TP_fast_assign(
864                 __entry->dev                    = c->dev;
865                 __entry->sectors_moved          = sectors_moved;
866                 __entry->sectors_not_moved      = sectors_not_moved;
867                 __entry->buckets_moved          = buckets_moved;
868                 __entry->buckets_not_moved = buckets_moved;
869         ),
870
871         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
872                   MAJOR(__entry->dev), MINOR(__entry->dev),
873                   __entry->sectors_moved, __entry->sectors_not_moved,
874                   __entry->buckets_moved, __entry->buckets_not_moved)
875 );
876
877 TRACE_EVENT(copygc_wait,
878         TP_PROTO(struct bch_fs *c,
879                  u64 wait_amount, u64 until),
880         TP_ARGS(c, wait_amount, until),
881
882         TP_STRUCT__entry(
883                 __field(dev_t,          dev                     )
884                 __field(u64,            wait_amount             )
885                 __field(u64,            until                   )
886         ),
887
888         TP_fast_assign(
889                 __entry->dev            = c->dev;
890                 __entry->wait_amount    = wait_amount;
891                 __entry->until          = until;
892         ),
893
894         TP_printk("%d,%u waiting for %llu sectors until %llu",
895                   MAJOR(__entry->dev), MINOR(__entry->dev),
896                   __entry->wait_amount, __entry->until)
897 );
898
899 /* btree transactions: */
900
901 DECLARE_EVENT_CLASS(transaction_event,
902         TP_PROTO(struct btree_trans *trans,
903                  unsigned long caller_ip),
904         TP_ARGS(trans, caller_ip),
905
906         TP_STRUCT__entry(
907                 __array(char,                   trans_fn, 32    )
908                 __field(unsigned long,          caller_ip       )
909         ),
910
911         TP_fast_assign(
912                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
913                 __entry->caller_ip              = caller_ip;
914         ),
915
916         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
917 );
918
919 DEFINE_EVENT(transaction_event, transaction_commit,
920         TP_PROTO(struct btree_trans *trans,
921                  unsigned long caller_ip),
922         TP_ARGS(trans, caller_ip)
923 );
924
925 DEFINE_EVENT(transaction_event, trans_restart_injected,
926         TP_PROTO(struct btree_trans *trans,
927                  unsigned long caller_ip),
928         TP_ARGS(trans, caller_ip)
929 );
930
931 TRACE_EVENT(trans_restart_split_race,
932         TP_PROTO(struct btree_trans *trans,
933                  unsigned long caller_ip,
934                  struct btree *b),
935         TP_ARGS(trans, caller_ip, b),
936
937         TP_STRUCT__entry(
938                 __array(char,                   trans_fn, 32    )
939                 __field(unsigned long,          caller_ip       )
940                 __field(u8,                     level           )
941                 __field(u16,                    written         )
942                 __field(u16,                    blocks          )
943                 __field(u16,                    u64s_remaining  )
944         ),
945
946         TP_fast_assign(
947                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
948                 __entry->caller_ip              = caller_ip;
949                 __entry->level          = b->c.level;
950                 __entry->written        = b->written;
951                 __entry->blocks         = btree_blocks(trans->c);
952                 __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
953         ),
954
955         TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
956                   __entry->trans_fn, (void *) __entry->caller_ip,
957                   __entry->level,
958                   __entry->written, __entry->blocks,
959                   __entry->u64s_remaining)
960 );
961
962 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
963         TP_PROTO(struct btree_trans *trans,
964                  unsigned long caller_ip),
965         TP_ARGS(trans, caller_ip)
966 );
967
968 TRACE_EVENT(trans_restart_journal_preres_get,
969         TP_PROTO(struct btree_trans *trans,
970                  unsigned long caller_ip,
971                  unsigned flags),
972         TP_ARGS(trans, caller_ip, flags),
973
974         TP_STRUCT__entry(
975                 __array(char,                   trans_fn, 32    )
976                 __field(unsigned long,          caller_ip       )
977                 __field(unsigned,               flags           )
978         ),
979
980         TP_fast_assign(
981                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
982                 __entry->caller_ip              = caller_ip;
983                 __entry->flags                  = flags;
984         ),
985
986         TP_printk("%s %pS %x", __entry->trans_fn,
987                   (void *) __entry->caller_ip,
988                   __entry->flags)
989 );
990
991 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
992         TP_PROTO(struct btree_trans *trans,
993                  unsigned long caller_ip),
994         TP_ARGS(trans, caller_ip)
995 );
996
997 DEFINE_EVENT(transaction_event, trans_traverse_all,
998         TP_PROTO(struct btree_trans *trans,
999                  unsigned long caller_ip),
1000         TP_ARGS(trans, caller_ip)
1001 );
1002
1003 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
1004         TP_PROTO(struct btree_trans *trans,
1005                  unsigned long caller_ip),
1006         TP_ARGS(trans, caller_ip)
1007 );
1008
1009 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
1010         TP_PROTO(struct btree_trans *trans,
1011                  unsigned long caller_ip),
1012         TP_ARGS(trans, caller_ip)
1013 );
1014
1015 DECLARE_EVENT_CLASS(transaction_restart_iter,
1016         TP_PROTO(struct btree_trans *trans,
1017                  unsigned long caller_ip,
1018                  struct btree_path *path),
1019         TP_ARGS(trans, caller_ip, path),
1020
1021         TP_STRUCT__entry(
1022                 __array(char,                   trans_fn, 32    )
1023                 __field(unsigned long,          caller_ip       )
1024                 __field(u8,                     btree_id        )
1025                 TRACE_BPOS_entries(pos)
1026         ),
1027
1028         TP_fast_assign(
1029                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1030                 __entry->caller_ip              = caller_ip;
1031                 __entry->btree_id               = path->btree_id;
1032                 TRACE_BPOS_assign(pos, path->pos)
1033         ),
1034
1035         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1036                   __entry->trans_fn,
1037                   (void *) __entry->caller_ip,
1038                   bch2_btree_id_str(__entry->btree_id),
1039                   __entry->pos_inode,
1040                   __entry->pos_offset,
1041                   __entry->pos_snapshot)
1042 );
1043
1044 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
1045         TP_PROTO(struct btree_trans *trans,
1046                  unsigned long caller_ip,
1047                  struct btree_path *path),
1048         TP_ARGS(trans, caller_ip, path)
1049 );
1050
1051 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
1052         TP_PROTO(struct btree_trans *trans,
1053                  unsigned long caller_ip,
1054                  struct btree_path *path),
1055         TP_ARGS(trans, caller_ip, path)
1056 );
1057
1058 struct get_locks_fail;
1059
1060 TRACE_EVENT(trans_restart_upgrade,
1061         TP_PROTO(struct btree_trans *trans,
1062                  unsigned long caller_ip,
1063                  struct btree_path *path,
1064                  unsigned old_locks_want,
1065                  unsigned new_locks_want,
1066                  struct get_locks_fail *f),
1067         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1068
1069         TP_STRUCT__entry(
1070                 __array(char,                   trans_fn, 32    )
1071                 __field(unsigned long,          caller_ip       )
1072                 __field(u8,                     btree_id        )
1073                 __field(u8,                     old_locks_want  )
1074                 __field(u8,                     new_locks_want  )
1075                 __field(u8,                     level           )
1076                 __field(u32,                    path_seq        )
1077                 __field(u32,                    node_seq        )
1078                 __field(u32,                    path_alloc_seq  )
1079                 __field(u32,                    downgrade_seq)
1080                 TRACE_BPOS_entries(pos)
1081         ),
1082
1083         TP_fast_assign(
1084                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1085                 __entry->caller_ip              = caller_ip;
1086                 __entry->btree_id               = path->btree_id;
1087                 __entry->old_locks_want         = old_locks_want;
1088                 __entry->new_locks_want         = new_locks_want;
1089                 __entry->level                  = f->l;
1090                 __entry->path_seq               = path->l[f->l].lock_seq;
1091                 __entry->node_seq               = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1092                 __entry->path_alloc_seq         = path->alloc_seq;
1093                 __entry->downgrade_seq          = path->downgrade_seq;
1094                 TRACE_BPOS_assign(pos, path->pos)
1095         ),
1096
1097         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
1098                   __entry->trans_fn,
1099                   (void *) __entry->caller_ip,
1100                   bch2_btree_id_str(__entry->btree_id),
1101                   __entry->pos_inode,
1102                   __entry->pos_offset,
1103                   __entry->pos_snapshot,
1104                   __entry->old_locks_want,
1105                   __entry->new_locks_want,
1106                   __entry->level,
1107                   __entry->path_seq,
1108                   __entry->node_seq,
1109                   __entry->path_alloc_seq,
1110                   __entry->downgrade_seq)
1111 );
1112
1113 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
1114         TP_PROTO(struct btree_trans *trans,
1115                  unsigned long caller_ip,
1116                  struct btree_path *path),
1117         TP_ARGS(trans, caller_ip, path)
1118 );
1119
1120 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
1121         TP_PROTO(struct btree_trans *trans,
1122                  unsigned long caller_ip,
1123                  struct btree_path *path),
1124         TP_ARGS(trans, caller_ip, path)
1125 );
1126
1127 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
1128         TP_PROTO(struct btree_trans *trans,
1129                  unsigned long caller_ip,
1130                  struct btree_path *path),
1131         TP_ARGS(trans, caller_ip, path)
1132 );
1133
1134 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
1135         TP_PROTO(struct btree_trans *trans,
1136                  unsigned long caller_ip,
1137                  struct btree_path *path),
1138         TP_ARGS(trans, caller_ip, path)
1139 );
1140
1141 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1142         TP_PROTO(struct btree_trans *trans,
1143                  unsigned long caller_ip),
1144         TP_ARGS(trans, caller_ip)
1145 );
1146
1147 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
1148         TP_PROTO(struct btree_trans *trans,
1149                  unsigned long caller_ip,
1150                  struct btree_path *path),
1151         TP_ARGS(trans, caller_ip, path)
1152 );
1153
1154 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
1155         TP_PROTO(struct btree_trans *trans,
1156                  unsigned long caller_ip,
1157                  struct btree_path *path),
1158         TP_ARGS(trans, caller_ip, path)
1159 );
1160
1161 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
1162         TP_PROTO(struct btree_trans *trans,
1163                  unsigned long caller_ip,
1164                  struct btree_path *path),
1165         TP_ARGS(trans, caller_ip, path)
1166 );
1167
1168 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
1169         TP_PROTO(struct btree_trans *trans,
1170                  unsigned long caller_ip,
1171                  struct btree_path *path),
1172         TP_ARGS(trans, caller_ip, path)
1173 );
1174
1175 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1176         TP_PROTO(struct btree_trans *trans,
1177                  unsigned long caller_ip,
1178                  struct btree_path *path),
1179         TP_ARGS(trans, caller_ip, path)
1180 );
1181
1182 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
1183         TP_PROTO(struct btree_trans *trans,
1184                  unsigned long caller_ip),
1185         TP_ARGS(trans, caller_ip)
1186 );
1187
1188 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1189         TP_PROTO(struct btree_trans *trans,
1190                  unsigned long caller_ip),
1191         TP_ARGS(trans, caller_ip)
1192 );
1193
1194 TRACE_EVENT(trans_restart_would_deadlock_write,
1195         TP_PROTO(struct btree_trans *trans),
1196         TP_ARGS(trans),
1197
1198         TP_STRUCT__entry(
1199                 __array(char,                   trans_fn, 32    )
1200         ),
1201
1202         TP_fast_assign(
1203                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1204         ),
1205
1206         TP_printk("%s", __entry->trans_fn)
1207 );
1208
1209 TRACE_EVENT(trans_restart_mem_realloced,
1210         TP_PROTO(struct btree_trans *trans,
1211                  unsigned long caller_ip,
1212                  unsigned long bytes),
1213         TP_ARGS(trans, caller_ip, bytes),
1214
1215         TP_STRUCT__entry(
1216                 __array(char,                   trans_fn, 32    )
1217                 __field(unsigned long,          caller_ip       )
1218                 __field(unsigned long,          bytes           )
1219         ),
1220
1221         TP_fast_assign(
1222                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1223                 __entry->caller_ip      = caller_ip;
1224                 __entry->bytes          = bytes;
1225         ),
1226
1227         TP_printk("%s %pS bytes %lu",
1228                   __entry->trans_fn,
1229                   (void *) __entry->caller_ip,
1230                   __entry->bytes)
1231 );
1232
1233 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1234         TP_PROTO(struct btree_trans *trans,
1235                  unsigned long caller_ip,
1236                  struct btree_path *path,
1237                  unsigned old_u64s,
1238                  unsigned new_u64s),
1239         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1240
1241         TP_STRUCT__entry(
1242                 __array(char,                   trans_fn, 32    )
1243                 __field(unsigned long,          caller_ip       )
1244                 __field(enum btree_id,          btree_id        )
1245                 TRACE_BPOS_entries(pos)
1246                 __field(u32,                    old_u64s        )
1247                 __field(u32,                    new_u64s        )
1248         ),
1249
1250         TP_fast_assign(
1251                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1252                 __entry->caller_ip              = caller_ip;
1253
1254                 __entry->btree_id       = path->btree_id;
1255                 TRACE_BPOS_assign(pos, path->pos);
1256                 __entry->old_u64s       = old_u64s;
1257                 __entry->new_u64s       = new_u64s;
1258         ),
1259
1260         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1261                   __entry->trans_fn,
1262                   (void *) __entry->caller_ip,
1263                   bch2_btree_id_str(__entry->btree_id),
1264                   __entry->pos_inode,
1265                   __entry->pos_offset,
1266                   __entry->pos_snapshot,
1267                   __entry->old_u64s,
1268                   __entry->new_u64s)
1269 );
1270
1271 TRACE_EVENT(path_downgrade,
1272         TP_PROTO(struct btree_trans *trans,
1273                  unsigned long caller_ip,
1274                  struct btree_path *path,
1275                  unsigned old_locks_want),
1276         TP_ARGS(trans, caller_ip, path, old_locks_want),
1277
1278         TP_STRUCT__entry(
1279                 __array(char,                   trans_fn, 32    )
1280                 __field(unsigned long,          caller_ip       )
1281                 __field(unsigned,               old_locks_want  )
1282                 __field(unsigned,               new_locks_want  )
1283                 __field(unsigned,               btree           )
1284                 TRACE_BPOS_entries(pos)
1285         ),
1286
1287         TP_fast_assign(
1288                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1289                 __entry->caller_ip              = caller_ip;
1290                 __entry->old_locks_want         = old_locks_want;
1291                 __entry->new_locks_want         = path->locks_want;
1292                 __entry->btree                  = path->btree_id;
1293                 TRACE_BPOS_assign(pos, path->pos);
1294         ),
1295
1296         TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1297                   __entry->trans_fn,
1298                   (void *) __entry->caller_ip,
1299                   __entry->old_locks_want,
1300                   __entry->new_locks_want,
1301                   bch2_btree_id_str(__entry->btree),
1302                   __entry->pos_inode,
1303                   __entry->pos_offset,
1304                   __entry->pos_snapshot)
1305 );
1306
1307 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1308         TP_PROTO(struct btree_trans *trans,
1309                  unsigned long caller_ip),
1310         TP_ARGS(trans, caller_ip)
1311 );
1312
1313 TRACE_EVENT(write_buffer_flush,
1314         TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1315         TP_ARGS(trans, nr, skipped, fast, size),
1316
1317         TP_STRUCT__entry(
1318                 __field(size_t,         nr              )
1319                 __field(size_t,         skipped         )
1320                 __field(size_t,         fast            )
1321                 __field(size_t,         size            )
1322         ),
1323
1324         TP_fast_assign(
1325                 __entry->nr     = nr;
1326                 __entry->skipped = skipped;
1327                 __entry->fast   = fast;
1328                 __entry->size   = size;
1329         ),
1330
1331         TP_printk("%zu/%zu skipped %zu fast %zu",
1332                   __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1333 );
1334
1335 TRACE_EVENT(write_buffer_flush_sync,
1336         TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1337         TP_ARGS(trans, caller_ip),
1338
1339         TP_STRUCT__entry(
1340                 __array(char,                   trans_fn, 32    )
1341                 __field(unsigned long,          caller_ip       )
1342         ),
1343
1344         TP_fast_assign(
1345                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1346                 __entry->caller_ip              = caller_ip;
1347         ),
1348
1349         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1350 );
1351
1352 TRACE_EVENT(write_buffer_flush_slowpath,
1353         TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1354         TP_ARGS(trans, slowpath, total),
1355
1356         TP_STRUCT__entry(
1357                 __field(size_t,         slowpath        )
1358                 __field(size_t,         total           )
1359         ),
1360
1361         TP_fast_assign(
1362                 __entry->slowpath       = slowpath;
1363                 __entry->total          = total;
1364         ),
1365
1366         TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1367 );
1368
1369 #endif /* _TRACE_BCACHEFS_H */
1370
1371 /* This part must be outside protection */
1372 #undef TRACE_INCLUDE_PATH
1373 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1374
1375 #undef TRACE_INCLUDE_FILE
1376 #define TRACE_INCLUDE_FILE trace
1377
1378 #include <trace/define_trace.h>