]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/trace.h
Update bcachefs sources to b9bd69421f73 bcachefs: x-macro-ify inode flags enum
[bcachefs-tools-debian] / libbcachefs / trace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(bkey,
36         TP_PROTO(struct bch_fs *c, const char *k),
37         TP_ARGS(c, k),
38
39         TP_STRUCT__entry(
40                 __string(k,     k                               )
41         ),
42
43         TP_fast_assign(
44                 __assign_str(k, k);
45         ),
46
47         TP_printk("%s", __get_str(k))
48 );
49
50 DECLARE_EVENT_CLASS(btree_node,
51         TP_PROTO(struct bch_fs *c, struct btree *b),
52         TP_ARGS(c, b),
53
54         TP_STRUCT__entry(
55                 __field(dev_t,          dev                     )
56                 __field(u8,             level                   )
57                 __field(u8,             btree_id                )
58                 TRACE_BPOS_entries(pos)
59         ),
60
61         TP_fast_assign(
62                 __entry->dev            = c->dev;
63                 __entry->level          = b->c.level;
64                 __entry->btree_id       = b->c.btree_id;
65                 TRACE_BPOS_assign(pos, b->key.k.p);
66         ),
67
68         TP_printk("%d,%d %u %s %llu:%llu:%u",
69                   MAJOR(__entry->dev), MINOR(__entry->dev),
70                   __entry->level,
71                   bch2_btree_id_str(__entry->btree_id),
72                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
73 );
74
75 DECLARE_EVENT_CLASS(bch_fs,
76         TP_PROTO(struct bch_fs *c),
77         TP_ARGS(c),
78
79         TP_STRUCT__entry(
80                 __field(dev_t,          dev                     )
81         ),
82
83         TP_fast_assign(
84                 __entry->dev            = c->dev;
85         ),
86
87         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
88 );
89
90 DECLARE_EVENT_CLASS(bio,
91         TP_PROTO(struct bio *bio),
92         TP_ARGS(bio),
93
94         TP_STRUCT__entry(
95                 __field(dev_t,          dev                     )
96                 __field(sector_t,       sector                  )
97                 __field(unsigned int,   nr_sector               )
98                 __array(char,           rwbs,   6               )
99         ),
100
101         TP_fast_assign(
102                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
103                 __entry->sector         = bio->bi_iter.bi_sector;
104                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
105                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
106         ),
107
108         TP_printk("%d,%d  %s %llu + %u",
109                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110                   (unsigned long long)__entry->sector, __entry->nr_sector)
111 );
112
113 /* super-io.c: */
114 TRACE_EVENT(write_super,
115         TP_PROTO(struct bch_fs *c, unsigned long ip),
116         TP_ARGS(c, ip),
117
118         TP_STRUCT__entry(
119                 __field(dev_t,          dev     )
120                 __field(unsigned long,  ip      )
121         ),
122
123         TP_fast_assign(
124                 __entry->dev            = c->dev;
125                 __entry->ip             = ip;
126         ),
127
128         TP_printk("%d,%d for %pS",
129                   MAJOR(__entry->dev), MINOR(__entry->dev),
130                   (void *) __entry->ip)
131 );
132
133 /* io.c: */
134
135 DEFINE_EVENT(bio, read_promote,
136         TP_PROTO(struct bio *bio),
137         TP_ARGS(bio)
138 );
139
140 TRACE_EVENT(read_nopromote,
141         TP_PROTO(struct bch_fs *c, int ret),
142         TP_ARGS(c, ret),
143
144         TP_STRUCT__entry(
145                 __field(dev_t,          dev             )
146                 __array(char,           ret, 32         )
147         ),
148
149         TP_fast_assign(
150                 __entry->dev            = c->dev;
151                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
152         ),
153
154         TP_printk("%d,%d ret %s",
155                   MAJOR(__entry->dev), MINOR(__entry->dev),
156                   __entry->ret)
157 );
158
159 DEFINE_EVENT(bio, read_bounce,
160         TP_PROTO(struct bio *bio),
161         TP_ARGS(bio)
162 );
163
164 DEFINE_EVENT(bio, read_split,
165         TP_PROTO(struct bio *bio),
166         TP_ARGS(bio)
167 );
168
169 DEFINE_EVENT(bio, read_retry,
170         TP_PROTO(struct bio *bio),
171         TP_ARGS(bio)
172 );
173
174 DEFINE_EVENT(bio, read_reuse_race,
175         TP_PROTO(struct bio *bio),
176         TP_ARGS(bio)
177 );
178
179 /* Journal */
180
181 DEFINE_EVENT(bch_fs, journal_full,
182         TP_PROTO(struct bch_fs *c),
183         TP_ARGS(c)
184 );
185
186 DEFINE_EVENT(bch_fs, journal_entry_full,
187         TP_PROTO(struct bch_fs *c),
188         TP_ARGS(c)
189 );
190
191 DEFINE_EVENT(bio, journal_write,
192         TP_PROTO(struct bio *bio),
193         TP_ARGS(bio)
194 );
195
196 TRACE_EVENT(journal_reclaim_start,
197         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
198                  u64 min_nr, u64 min_key_cache,
199                  u64 prereserved, u64 prereserved_total,
200                  u64 btree_cache_dirty, u64 btree_cache_total,
201                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
202         TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
203                 btree_cache_dirty, btree_cache_total,
204                 btree_key_cache_dirty, btree_key_cache_total),
205
206         TP_STRUCT__entry(
207                 __field(dev_t,          dev                     )
208                 __field(bool,           direct                  )
209                 __field(bool,           kicked                  )
210                 __field(u64,            min_nr                  )
211                 __field(u64,            min_key_cache           )
212                 __field(u64,            prereserved             )
213                 __field(u64,            prereserved_total       )
214                 __field(u64,            btree_cache_dirty       )
215                 __field(u64,            btree_cache_total       )
216                 __field(u64,            btree_key_cache_dirty   )
217                 __field(u64,            btree_key_cache_total   )
218         ),
219
220         TP_fast_assign(
221                 __entry->dev                    = c->dev;
222                 __entry->direct                 = direct;
223                 __entry->kicked                 = kicked;
224                 __entry->min_nr                 = min_nr;
225                 __entry->min_key_cache          = min_key_cache;
226                 __entry->prereserved            = prereserved;
227                 __entry->prereserved_total      = prereserved_total;
228                 __entry->btree_cache_dirty      = btree_cache_dirty;
229                 __entry->btree_cache_total      = btree_cache_total;
230                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
231                 __entry->btree_key_cache_total  = btree_key_cache_total;
232         ),
233
234         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
235                   MAJOR(__entry->dev), MINOR(__entry->dev),
236                   __entry->direct,
237                   __entry->kicked,
238                   __entry->min_nr,
239                   __entry->min_key_cache,
240                   __entry->prereserved,
241                   __entry->prereserved_total,
242                   __entry->btree_cache_dirty,
243                   __entry->btree_cache_total,
244                   __entry->btree_key_cache_dirty,
245                   __entry->btree_key_cache_total)
246 );
247
248 TRACE_EVENT(journal_reclaim_finish,
249         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
250         TP_ARGS(c, nr_flushed),
251
252         TP_STRUCT__entry(
253                 __field(dev_t,          dev                     )
254                 __field(u64,            nr_flushed              )
255         ),
256
257         TP_fast_assign(
258                 __entry->dev            = c->dev;
259                 __entry->nr_flushed     = nr_flushed;
260         ),
261
262         TP_printk("%d,%d flushed %llu",
263                   MAJOR(__entry->dev), MINOR(__entry->dev),
264                   __entry->nr_flushed)
265 );
266
267 /* bset.c: */
268
269 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
270         TP_PROTO(const struct bpos *p),
271         TP_ARGS(p)
272 );
273
274 /* Btree cache: */
275
276 TRACE_EVENT(btree_cache_scan,
277         TP_PROTO(long nr_to_scan, long can_free, long ret),
278         TP_ARGS(nr_to_scan, can_free, ret),
279
280         TP_STRUCT__entry(
281                 __field(long,   nr_to_scan              )
282                 __field(long,   can_free                )
283                 __field(long,   ret                     )
284         ),
285
286         TP_fast_assign(
287                 __entry->nr_to_scan     = nr_to_scan;
288                 __entry->can_free       = can_free;
289                 __entry->ret            = ret;
290         ),
291
292         TP_printk("scanned for %li nodes, can free %li, ret %li",
293                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
294 );
295
296 DEFINE_EVENT(btree_node, btree_cache_reap,
297         TP_PROTO(struct bch_fs *c, struct btree *b),
298         TP_ARGS(c, b)
299 );
300
301 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
302         TP_PROTO(struct bch_fs *c),
303         TP_ARGS(c)
304 );
305
306 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
307         TP_PROTO(struct bch_fs *c),
308         TP_ARGS(c)
309 );
310
311 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
312         TP_PROTO(struct bch_fs *c),
313         TP_ARGS(c)
314 );
315
316 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
317         TP_PROTO(struct bch_fs *c),
318         TP_ARGS(c)
319 );
320
321 /* Btree */
322
323 DEFINE_EVENT(btree_node, btree_node_read,
324         TP_PROTO(struct bch_fs *c, struct btree *b),
325         TP_ARGS(c, b)
326 );
327
328 TRACE_EVENT(btree_node_write,
329         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
330         TP_ARGS(b, bytes, sectors),
331
332         TP_STRUCT__entry(
333                 __field(enum btree_node_type,   type)
334                 __field(unsigned,       bytes                   )
335                 __field(unsigned,       sectors                 )
336         ),
337
338         TP_fast_assign(
339                 __entry->type   = btree_node_type(b);
340                 __entry->bytes  = bytes;
341                 __entry->sectors = sectors;
342         ),
343
344         TP_printk("bkey type %u bytes %u sectors %u",
345                   __entry->type , __entry->bytes, __entry->sectors)
346 );
347
348 DEFINE_EVENT(btree_node, btree_node_alloc,
349         TP_PROTO(struct bch_fs *c, struct btree *b),
350         TP_ARGS(c, b)
351 );
352
353 DEFINE_EVENT(btree_node, btree_node_free,
354         TP_PROTO(struct bch_fs *c, struct btree *b),
355         TP_ARGS(c, b)
356 );
357
358 TRACE_EVENT(btree_reserve_get_fail,
359         TP_PROTO(const char *trans_fn,
360                  unsigned long caller_ip,
361                  size_t required,
362                  int ret),
363         TP_ARGS(trans_fn, caller_ip, required, ret),
364
365         TP_STRUCT__entry(
366                 __array(char,                   trans_fn, 32    )
367                 __field(unsigned long,          caller_ip       )
368                 __field(size_t,                 required        )
369                 __array(char,                   ret, 32         )
370         ),
371
372         TP_fast_assign(
373                 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
374                 __entry->caller_ip      = caller_ip;
375                 __entry->required       = required;
376                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
377         ),
378
379         TP_printk("%s %pS required %zu ret %s",
380                   __entry->trans_fn,
381                   (void *) __entry->caller_ip,
382                   __entry->required,
383                   __entry->ret)
384 );
385
386 DEFINE_EVENT(btree_node, btree_node_compact,
387         TP_PROTO(struct bch_fs *c, struct btree *b),
388         TP_ARGS(c, b)
389 );
390
391 DEFINE_EVENT(btree_node, btree_node_merge,
392         TP_PROTO(struct bch_fs *c, struct btree *b),
393         TP_ARGS(c, b)
394 );
395
396 DEFINE_EVENT(btree_node, btree_node_split,
397         TP_PROTO(struct bch_fs *c, struct btree *b),
398         TP_ARGS(c, b)
399 );
400
401 DEFINE_EVENT(btree_node, btree_node_rewrite,
402         TP_PROTO(struct bch_fs *c, struct btree *b),
403         TP_ARGS(c, b)
404 );
405
406 DEFINE_EVENT(btree_node, btree_node_set_root,
407         TP_PROTO(struct bch_fs *c, struct btree *b),
408         TP_ARGS(c, b)
409 );
410
411 TRACE_EVENT(btree_path_relock_fail,
412         TP_PROTO(struct btree_trans *trans,
413                  unsigned long caller_ip,
414                  struct btree_path *path,
415                  unsigned level),
416         TP_ARGS(trans, caller_ip, path, level),
417
418         TP_STRUCT__entry(
419                 __array(char,                   trans_fn, 32    )
420                 __field(unsigned long,          caller_ip       )
421                 __field(u8,                     btree_id        )
422                 __field(u8,                     level           )
423                 TRACE_BPOS_entries(pos)
424                 __array(char,                   node, 24        )
425                 __field(u8,                     self_read_count )
426                 __field(u8,                     self_intent_count)
427                 __field(u8,                     read_count      )
428                 __field(u8,                     intent_count    )
429                 __field(u32,                    iter_lock_seq   )
430                 __field(u32,                    node_lock_seq   )
431         ),
432
433         TP_fast_assign(
434                 struct btree *b = btree_path_node(path, level);
435                 struct six_lock_count c;
436
437                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
438                 __entry->caller_ip              = caller_ip;
439                 __entry->btree_id               = path->btree_id;
440                 __entry->level                  = path->level;
441                 TRACE_BPOS_assign(pos, path->pos);
442
443                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
444                 __entry->self_read_count        = c.n[SIX_LOCK_read];
445                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
446
447                 if (IS_ERR(b)) {
448                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
449                 } else {
450                         c = six_lock_counts(&path->l[level].b->c.lock);
451                         __entry->read_count     = c.n[SIX_LOCK_read];
452                         __entry->intent_count   = c.n[SIX_LOCK_intent];
453                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
454                 }
455                 __entry->iter_lock_seq          = path->l[level].lock_seq;
456                 __entry->node_lock_seq          = is_btree_node(path, level)
457                         ? six_lock_seq(&path->l[level].b->c.lock)
458                         : 0;
459         ),
460
461         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
462                   __entry->trans_fn,
463                   (void *) __entry->caller_ip,
464                   bch2_btree_id_str(__entry->btree_id),
465                   __entry->pos_inode,
466                   __entry->pos_offset,
467                   __entry->pos_snapshot,
468                   __entry->level,
469                   __entry->node,
470                   __entry->self_read_count,
471                   __entry->self_intent_count,
472                   __entry->read_count,
473                   __entry->intent_count,
474                   __entry->iter_lock_seq,
475                   __entry->node_lock_seq)
476 );
477
478 TRACE_EVENT(btree_path_upgrade_fail,
479         TP_PROTO(struct btree_trans *trans,
480                  unsigned long caller_ip,
481                  struct btree_path *path,
482                  unsigned level),
483         TP_ARGS(trans, caller_ip, path, level),
484
485         TP_STRUCT__entry(
486                 __array(char,                   trans_fn, 32    )
487                 __field(unsigned long,          caller_ip       )
488                 __field(u8,                     btree_id        )
489                 __field(u8,                     level           )
490                 TRACE_BPOS_entries(pos)
491                 __field(u8,                     locked          )
492                 __field(u8,                     self_read_count )
493                 __field(u8,                     self_intent_count)
494                 __field(u8,                     read_count      )
495                 __field(u8,                     intent_count    )
496                 __field(u32,                    iter_lock_seq   )
497                 __field(u32,                    node_lock_seq   )
498         ),
499
500         TP_fast_assign(
501                 struct six_lock_count c;
502
503                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
504                 __entry->caller_ip              = caller_ip;
505                 __entry->btree_id               = path->btree_id;
506                 __entry->level                  = level;
507                 TRACE_BPOS_assign(pos, path->pos);
508                 __entry->locked                 = btree_node_locked(path, level);
509
510                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
511                 __entry->self_read_count        = c.n[SIX_LOCK_read];
512                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
513                 c = six_lock_counts(&path->l[level].b->c.lock);
514                 __entry->read_count             = c.n[SIX_LOCK_read];
515                 __entry->intent_count           = c.n[SIX_LOCK_intent];
516                 __entry->iter_lock_seq          = path->l[level].lock_seq;
517                 __entry->node_lock_seq          = is_btree_node(path, level)
518                         ? six_lock_seq(&path->l[level].b->c.lock)
519                         : 0;
520         ),
521
522         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
523                   __entry->trans_fn,
524                   (void *) __entry->caller_ip,
525                   bch2_btree_id_str(__entry->btree_id),
526                   __entry->pos_inode,
527                   __entry->pos_offset,
528                   __entry->pos_snapshot,
529                   __entry->level,
530                   __entry->locked,
531                   __entry->self_read_count,
532                   __entry->self_intent_count,
533                   __entry->read_count,
534                   __entry->intent_count,
535                   __entry->iter_lock_seq,
536                   __entry->node_lock_seq)
537 );
538
539 /* Garbage collection */
540
541 DEFINE_EVENT(bch_fs, gc_gens_start,
542         TP_PROTO(struct bch_fs *c),
543         TP_ARGS(c)
544 );
545
546 DEFINE_EVENT(bch_fs, gc_gens_end,
547         TP_PROTO(struct bch_fs *c),
548         TP_ARGS(c)
549 );
550
551 /* Allocator */
552
553 DECLARE_EVENT_CLASS(bucket_alloc,
554         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
555                  u64 bucket,
556                  u64 free,
557                  u64 avail,
558                  u64 copygc_wait_amount,
559                  s64 copygc_waiting_for,
560                  struct bucket_alloc_state *s,
561                  bool nonblocking,
562                  const char *err),
563         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
564                 copygc_wait_amount, copygc_waiting_for,
565                 s, nonblocking, err),
566
567         TP_STRUCT__entry(
568                 __field(u8,                     dev                     )
569                 __array(char,   reserve,        16                      )
570                 __field(u64,                    bucket  )
571                 __field(u64,                    free                    )
572                 __field(u64,                    avail                   )
573                 __field(u64,                    copygc_wait_amount      )
574                 __field(s64,                    copygc_waiting_for      )
575                 __field(u64,                    seen                    )
576                 __field(u64,                    open                    )
577                 __field(u64,                    need_journal_commit     )
578                 __field(u64,                    nouse                   )
579                 __field(bool,                   nonblocking             )
580                 __field(u64,                    nocow                   )
581                 __array(char,                   err,    32              )
582         ),
583
584         TP_fast_assign(
585                 __entry->dev            = ca->dev_idx;
586                 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
587                 __entry->bucket         = bucket;
588                 __entry->free           = free;
589                 __entry->avail          = avail;
590                 __entry->copygc_wait_amount     = copygc_wait_amount;
591                 __entry->copygc_waiting_for     = copygc_waiting_for;
592                 __entry->seen           = s->buckets_seen;
593                 __entry->open           = s->skipped_open;
594                 __entry->need_journal_commit = s->skipped_need_journal_commit;
595                 __entry->nouse          = s->skipped_nouse;
596                 __entry->nonblocking    = nonblocking;
597                 __entry->nocow          = s->skipped_nocow;
598                 strscpy(__entry->err, err, sizeof(__entry->err));
599         ),
600
601         TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
602                   __entry->reserve,
603                   __entry->dev,
604                   __entry->bucket,
605                   __entry->free,
606                   __entry->avail,
607                   __entry->copygc_wait_amount,
608                   __entry->copygc_waiting_for,
609                   __entry->seen,
610                   __entry->open,
611                   __entry->need_journal_commit,
612                   __entry->nouse,
613                   __entry->nocow,
614                   __entry->nonblocking,
615                   __entry->err)
616 );
617
618 DEFINE_EVENT(bucket_alloc, bucket_alloc,
619         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
620                  u64 bucket,
621                  u64 free,
622                  u64 avail,
623                  u64 copygc_wait_amount,
624                  s64 copygc_waiting_for,
625                  struct bucket_alloc_state *s,
626                  bool nonblocking,
627                  const char *err),
628         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
629                 copygc_wait_amount, copygc_waiting_for,
630                 s, nonblocking, err)
631 );
632
633 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
634         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
635                  u64 bucket,
636                  u64 free,
637                  u64 avail,
638                  u64 copygc_wait_amount,
639                  s64 copygc_waiting_for,
640                  struct bucket_alloc_state *s,
641                  bool nonblocking,
642                  const char *err),
643         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
644                 copygc_wait_amount, copygc_waiting_for,
645                 s, nonblocking, err)
646 );
647
648 TRACE_EVENT(discard_buckets,
649         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
650                  u64 need_journal_commit, u64 discarded, const char *err),
651         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
652
653         TP_STRUCT__entry(
654                 __field(dev_t,          dev                     )
655                 __field(u64,            seen                    )
656                 __field(u64,            open                    )
657                 __field(u64,            need_journal_commit     )
658                 __field(u64,            discarded               )
659                 __array(char,           err,    16              )
660         ),
661
662         TP_fast_assign(
663                 __entry->dev                    = c->dev;
664                 __entry->seen                   = seen;
665                 __entry->open                   = open;
666                 __entry->need_journal_commit    = need_journal_commit;
667                 __entry->discarded              = discarded;
668                 strscpy(__entry->err, err, sizeof(__entry->err));
669         ),
670
671         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
672                   MAJOR(__entry->dev), MINOR(__entry->dev),
673                   __entry->seen,
674                   __entry->open,
675                   __entry->need_journal_commit,
676                   __entry->discarded,
677                   __entry->err)
678 );
679
680 TRACE_EVENT(bucket_invalidate,
681         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
682         TP_ARGS(c, dev, bucket, sectors),
683
684         TP_STRUCT__entry(
685                 __field(dev_t,          dev                     )
686                 __field(u32,            dev_idx                 )
687                 __field(u32,            sectors                 )
688                 __field(u64,            bucket                  )
689         ),
690
691         TP_fast_assign(
692                 __entry->dev            = c->dev;
693                 __entry->dev_idx        = dev;
694                 __entry->sectors        = sectors;
695                 __entry->bucket         = bucket;
696         ),
697
698         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
699                   MAJOR(__entry->dev), MINOR(__entry->dev),
700                   __entry->dev_idx, __entry->bucket,
701                   __entry->sectors)
702 );
703
704 /* Moving IO */
705
706 TRACE_EVENT(bucket_evacuate,
707         TP_PROTO(struct bch_fs *c, struct bpos *bucket),
708         TP_ARGS(c, bucket),
709
710         TP_STRUCT__entry(
711                 __field(dev_t,          dev                     )
712                 __field(u32,            dev_idx                 )
713                 __field(u64,            bucket                  )
714         ),
715
716         TP_fast_assign(
717                 __entry->dev            = c->dev;
718                 __entry->dev_idx        = bucket->inode;
719                 __entry->bucket         = bucket->offset;
720         ),
721
722         TP_printk("%d:%d %u:%llu",
723                   MAJOR(__entry->dev), MINOR(__entry->dev),
724                   __entry->dev_idx, __entry->bucket)
725 );
726
727 DEFINE_EVENT(bkey, move_extent,
728         TP_PROTO(struct bch_fs *c, const char *k),
729         TP_ARGS(c, k)
730 );
731
732 DEFINE_EVENT(bkey, move_extent_read,
733         TP_PROTO(struct bch_fs *c, const char *k),
734         TP_ARGS(c, k)
735 );
736
737 DEFINE_EVENT(bkey, move_extent_write,
738         TP_PROTO(struct bch_fs *c, const char *k),
739         TP_ARGS(c, k)
740 );
741
742 DEFINE_EVENT(bkey, move_extent_finish,
743         TP_PROTO(struct bch_fs *c, const char *k),
744         TP_ARGS(c, k)
745 );
746
747 TRACE_EVENT(move_extent_fail,
748         TP_PROTO(struct bch_fs *c, const char *msg),
749         TP_ARGS(c, msg),
750
751         TP_STRUCT__entry(
752                 __field(dev_t,          dev                     )
753                 __string(msg,           msg                     )
754         ),
755
756         TP_fast_assign(
757                 __entry->dev            = c->dev;
758                 __assign_str(msg, msg);
759         ),
760
761         TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
762 );
763
764 DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
765         TP_PROTO(struct bch_fs *c, const char *k),
766         TP_ARGS(c, k)
767 );
768
769 TRACE_EVENT(move_data,
770         TP_PROTO(struct bch_fs *c,
771                  struct bch_move_stats *stats),
772         TP_ARGS(c, stats),
773
774         TP_STRUCT__entry(
775                 __field(dev_t,          dev             )
776                 __field(u64,            keys_moved      )
777                 __field(u64,            keys_raced      )
778                 __field(u64,            sectors_seen    )
779                 __field(u64,            sectors_moved   )
780                 __field(u64,            sectors_raced   )
781         ),
782
783         TP_fast_assign(
784                 __entry->dev            = c->dev;
785                 __entry->keys_moved     = atomic64_read(&stats->keys_moved);
786                 __entry->keys_raced     = atomic64_read(&stats->keys_raced);
787                 __entry->sectors_seen   = atomic64_read(&stats->sectors_seen);
788                 __entry->sectors_moved  = atomic64_read(&stats->sectors_moved);
789                 __entry->sectors_raced  = atomic64_read(&stats->sectors_raced);
790         ),
791
792         TP_printk("%d,%d keys moved %llu raced %llu"
793                   "sectors seen %llu moved %llu raced %llu",
794                   MAJOR(__entry->dev), MINOR(__entry->dev),
795                   __entry->keys_moved,
796                   __entry->keys_raced,
797                   __entry->sectors_seen,
798                   __entry->sectors_moved,
799                   __entry->sectors_raced)
800 );
801
802 TRACE_EVENT(evacuate_bucket,
803         TP_PROTO(struct bch_fs *c, struct bpos *bucket,
804                  unsigned sectors, unsigned bucket_size,
805                  u64 fragmentation, int ret),
806         TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
807
808         TP_STRUCT__entry(
809                 __field(dev_t,          dev             )
810                 __field(u64,            member          )
811                 __field(u64,            bucket          )
812                 __field(u32,            sectors         )
813                 __field(u32,            bucket_size     )
814                 __field(u64,            fragmentation   )
815                 __field(int,            ret             )
816         ),
817
818         TP_fast_assign(
819                 __entry->dev                    = c->dev;
820                 __entry->member                 = bucket->inode;
821                 __entry->bucket                 = bucket->offset;
822                 __entry->sectors                = sectors;
823                 __entry->bucket_size            = bucket_size;
824                 __entry->fragmentation          = fragmentation;
825                 __entry->ret                    = ret;
826         ),
827
828         TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
829                   MAJOR(__entry->dev), MINOR(__entry->dev),
830                   __entry->member, __entry->bucket,
831                   __entry->sectors, __entry->bucket_size,
832                   __entry->fragmentation, __entry->ret)
833 );
834
835 TRACE_EVENT(copygc,
836         TP_PROTO(struct bch_fs *c,
837                  u64 sectors_moved, u64 sectors_not_moved,
838                  u64 buckets_moved, u64 buckets_not_moved),
839         TP_ARGS(c,
840                 sectors_moved, sectors_not_moved,
841                 buckets_moved, buckets_not_moved),
842
843         TP_STRUCT__entry(
844                 __field(dev_t,          dev                     )
845                 __field(u64,            sectors_moved           )
846                 __field(u64,            sectors_not_moved       )
847                 __field(u64,            buckets_moved           )
848                 __field(u64,            buckets_not_moved       )
849         ),
850
851         TP_fast_assign(
852                 __entry->dev                    = c->dev;
853                 __entry->sectors_moved          = sectors_moved;
854                 __entry->sectors_not_moved      = sectors_not_moved;
855                 __entry->buckets_moved          = buckets_moved;
856                 __entry->buckets_not_moved = buckets_moved;
857         ),
858
859         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
860                   MAJOR(__entry->dev), MINOR(__entry->dev),
861                   __entry->sectors_moved, __entry->sectors_not_moved,
862                   __entry->buckets_moved, __entry->buckets_not_moved)
863 );
864
865 TRACE_EVENT(copygc_wait,
866         TP_PROTO(struct bch_fs *c,
867                  u64 wait_amount, u64 until),
868         TP_ARGS(c, wait_amount, until),
869
870         TP_STRUCT__entry(
871                 __field(dev_t,          dev                     )
872                 __field(u64,            wait_amount             )
873                 __field(u64,            until                   )
874         ),
875
876         TP_fast_assign(
877                 __entry->dev            = c->dev;
878                 __entry->wait_amount    = wait_amount;
879                 __entry->until          = until;
880         ),
881
882         TP_printk("%d,%u waiting for %llu sectors until %llu",
883                   MAJOR(__entry->dev), MINOR(__entry->dev),
884                   __entry->wait_amount, __entry->until)
885 );
886
887 /* btree transactions: */
888
889 DECLARE_EVENT_CLASS(transaction_event,
890         TP_PROTO(struct btree_trans *trans,
891                  unsigned long caller_ip),
892         TP_ARGS(trans, caller_ip),
893
894         TP_STRUCT__entry(
895                 __array(char,                   trans_fn, 32    )
896                 __field(unsigned long,          caller_ip       )
897         ),
898
899         TP_fast_assign(
900                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
901                 __entry->caller_ip              = caller_ip;
902         ),
903
904         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
905 );
906
907 DEFINE_EVENT(transaction_event, transaction_commit,
908         TP_PROTO(struct btree_trans *trans,
909                  unsigned long caller_ip),
910         TP_ARGS(trans, caller_ip)
911 );
912
913 DEFINE_EVENT(transaction_event, trans_restart_injected,
914         TP_PROTO(struct btree_trans *trans,
915                  unsigned long caller_ip),
916         TP_ARGS(trans, caller_ip)
917 );
918
919 TRACE_EVENT(trans_restart_split_race,
920         TP_PROTO(struct btree_trans *trans,
921                  unsigned long caller_ip,
922                  struct btree *b),
923         TP_ARGS(trans, caller_ip, b),
924
925         TP_STRUCT__entry(
926                 __array(char,                   trans_fn, 32    )
927                 __field(unsigned long,          caller_ip       )
928                 __field(u8,                     level           )
929                 __field(u16,                    written         )
930                 __field(u16,                    blocks          )
931                 __field(u16,                    u64s_remaining  )
932         ),
933
934         TP_fast_assign(
935                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
936                 __entry->caller_ip              = caller_ip;
937                 __entry->level          = b->c.level;
938                 __entry->written        = b->written;
939                 __entry->blocks         = btree_blocks(trans->c);
940                 __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
941         ),
942
943         TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
944                   __entry->trans_fn, (void *) __entry->caller_ip,
945                   __entry->level,
946                   __entry->written, __entry->blocks,
947                   __entry->u64s_remaining)
948 );
949
950 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
951         TP_PROTO(struct btree_trans *trans,
952                  unsigned long caller_ip),
953         TP_ARGS(trans, caller_ip)
954 );
955
956 TRACE_EVENT(trans_restart_journal_preres_get,
957         TP_PROTO(struct btree_trans *trans,
958                  unsigned long caller_ip,
959                  unsigned flags),
960         TP_ARGS(trans, caller_ip, flags),
961
962         TP_STRUCT__entry(
963                 __array(char,                   trans_fn, 32    )
964                 __field(unsigned long,          caller_ip       )
965                 __field(unsigned,               flags           )
966         ),
967
968         TP_fast_assign(
969                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
970                 __entry->caller_ip              = caller_ip;
971                 __entry->flags                  = flags;
972         ),
973
974         TP_printk("%s %pS %x", __entry->trans_fn,
975                   (void *) __entry->caller_ip,
976                   __entry->flags)
977 );
978
979 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
980         TP_PROTO(struct btree_trans *trans,
981                  unsigned long caller_ip),
982         TP_ARGS(trans, caller_ip)
983 );
984
985 DEFINE_EVENT(transaction_event, trans_traverse_all,
986         TP_PROTO(struct btree_trans *trans,
987                  unsigned long caller_ip),
988         TP_ARGS(trans, caller_ip)
989 );
990
991 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
992         TP_PROTO(struct btree_trans *trans,
993                  unsigned long caller_ip),
994         TP_ARGS(trans, caller_ip)
995 );
996
997 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
998         TP_PROTO(struct btree_trans *trans,
999                  unsigned long caller_ip),
1000         TP_ARGS(trans, caller_ip)
1001 );
1002
1003 DECLARE_EVENT_CLASS(transaction_restart_iter,
1004         TP_PROTO(struct btree_trans *trans,
1005                  unsigned long caller_ip,
1006                  struct btree_path *path),
1007         TP_ARGS(trans, caller_ip, path),
1008
1009         TP_STRUCT__entry(
1010                 __array(char,                   trans_fn, 32    )
1011                 __field(unsigned long,          caller_ip       )
1012                 __field(u8,                     btree_id        )
1013                 TRACE_BPOS_entries(pos)
1014         ),
1015
1016         TP_fast_assign(
1017                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1018                 __entry->caller_ip              = caller_ip;
1019                 __entry->btree_id               = path->btree_id;
1020                 TRACE_BPOS_assign(pos, path->pos)
1021         ),
1022
1023         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1024                   __entry->trans_fn,
1025                   (void *) __entry->caller_ip,
1026                   bch2_btree_id_str(__entry->btree_id),
1027                   __entry->pos_inode,
1028                   __entry->pos_offset,
1029                   __entry->pos_snapshot)
1030 );
1031
1032 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
1033         TP_PROTO(struct btree_trans *trans,
1034                  unsigned long caller_ip,
1035                  struct btree_path *path),
1036         TP_ARGS(trans, caller_ip, path)
1037 );
1038
1039 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
1040         TP_PROTO(struct btree_trans *trans,
1041                  unsigned long caller_ip,
1042                  struct btree_path *path),
1043         TP_ARGS(trans, caller_ip, path)
1044 );
1045
1046 struct get_locks_fail;
1047
1048 TRACE_EVENT(trans_restart_upgrade,
1049         TP_PROTO(struct btree_trans *trans,
1050                  unsigned long caller_ip,
1051                  struct btree_path *path,
1052                  unsigned old_locks_want,
1053                  unsigned new_locks_want,
1054                  struct get_locks_fail *f),
1055         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1056
1057         TP_STRUCT__entry(
1058                 __array(char,                   trans_fn, 32    )
1059                 __field(unsigned long,          caller_ip       )
1060                 __field(u8,                     btree_id        )
1061                 __field(u8,                     old_locks_want  )
1062                 __field(u8,                     new_locks_want  )
1063                 __field(u8,                     level           )
1064                 __field(u32,                    path_seq        )
1065                 __field(u32,                    node_seq        )
1066                 __field(u32,                    path_alloc_seq  )
1067                 __field(u32,                    downgrade_seq)
1068                 TRACE_BPOS_entries(pos)
1069         ),
1070
1071         TP_fast_assign(
1072                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1073                 __entry->caller_ip              = caller_ip;
1074                 __entry->btree_id               = path->btree_id;
1075                 __entry->old_locks_want         = old_locks_want;
1076                 __entry->new_locks_want         = new_locks_want;
1077                 __entry->level                  = f->l;
1078                 __entry->path_seq               = path->l[f->l].lock_seq;
1079                 __entry->node_seq               = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1080                 __entry->path_alloc_seq         = path->alloc_seq;
1081                 __entry->downgrade_seq          = path->downgrade_seq;
1082                 TRACE_BPOS_assign(pos, path->pos)
1083         ),
1084
1085         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
1086                   __entry->trans_fn,
1087                   (void *) __entry->caller_ip,
1088                   bch2_btree_id_str(__entry->btree_id),
1089                   __entry->pos_inode,
1090                   __entry->pos_offset,
1091                   __entry->pos_snapshot,
1092                   __entry->old_locks_want,
1093                   __entry->new_locks_want,
1094                   __entry->level,
1095                   __entry->path_seq,
1096                   __entry->node_seq,
1097                   __entry->path_alloc_seq,
1098                   __entry->downgrade_seq)
1099 );
1100
1101 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
1102         TP_PROTO(struct btree_trans *trans,
1103                  unsigned long caller_ip,
1104                  struct btree_path *path),
1105         TP_ARGS(trans, caller_ip, path)
1106 );
1107
1108 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
1109         TP_PROTO(struct btree_trans *trans,
1110                  unsigned long caller_ip,
1111                  struct btree_path *path),
1112         TP_ARGS(trans, caller_ip, path)
1113 );
1114
1115 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
1116         TP_PROTO(struct btree_trans *trans,
1117                  unsigned long caller_ip,
1118                  struct btree_path *path),
1119         TP_ARGS(trans, caller_ip, path)
1120 );
1121
1122 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
1123         TP_PROTO(struct btree_trans *trans,
1124                  unsigned long caller_ip,
1125                  struct btree_path *path),
1126         TP_ARGS(trans, caller_ip, path)
1127 );
1128
1129 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1130         TP_PROTO(struct btree_trans *trans,
1131                  unsigned long caller_ip),
1132         TP_ARGS(trans, caller_ip)
1133 );
1134
1135 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
1136         TP_PROTO(struct btree_trans *trans,
1137                  unsigned long caller_ip,
1138                  struct btree_path *path),
1139         TP_ARGS(trans, caller_ip, path)
1140 );
1141
1142 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
1143         TP_PROTO(struct btree_trans *trans,
1144                  unsigned long caller_ip,
1145                  struct btree_path *path),
1146         TP_ARGS(trans, caller_ip, path)
1147 );
1148
1149 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
1150         TP_PROTO(struct btree_trans *trans,
1151                  unsigned long caller_ip,
1152                  struct btree_path *path),
1153         TP_ARGS(trans, caller_ip, path)
1154 );
1155
1156 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
1157         TP_PROTO(struct btree_trans *trans,
1158                  unsigned long caller_ip,
1159                  struct btree_path *path),
1160         TP_ARGS(trans, caller_ip, path)
1161 );
1162
1163 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1164         TP_PROTO(struct btree_trans *trans,
1165                  unsigned long caller_ip,
1166                  struct btree_path *path),
1167         TP_ARGS(trans, caller_ip, path)
1168 );
1169
1170 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
1171         TP_PROTO(struct btree_trans *trans,
1172                  unsigned long caller_ip),
1173         TP_ARGS(trans, caller_ip)
1174 );
1175
1176 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1177         TP_PROTO(struct btree_trans *trans,
1178                  unsigned long caller_ip),
1179         TP_ARGS(trans, caller_ip)
1180 );
1181
1182 TRACE_EVENT(trans_restart_would_deadlock_write,
1183         TP_PROTO(struct btree_trans *trans),
1184         TP_ARGS(trans),
1185
1186         TP_STRUCT__entry(
1187                 __array(char,                   trans_fn, 32    )
1188         ),
1189
1190         TP_fast_assign(
1191                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1192         ),
1193
1194         TP_printk("%s", __entry->trans_fn)
1195 );
1196
1197 TRACE_EVENT(trans_restart_mem_realloced,
1198         TP_PROTO(struct btree_trans *trans,
1199                  unsigned long caller_ip,
1200                  unsigned long bytes),
1201         TP_ARGS(trans, caller_ip, bytes),
1202
1203         TP_STRUCT__entry(
1204                 __array(char,                   trans_fn, 32    )
1205                 __field(unsigned long,          caller_ip       )
1206                 __field(unsigned long,          bytes           )
1207         ),
1208
1209         TP_fast_assign(
1210                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1211                 __entry->caller_ip      = caller_ip;
1212                 __entry->bytes          = bytes;
1213         ),
1214
1215         TP_printk("%s %pS bytes %lu",
1216                   __entry->trans_fn,
1217                   (void *) __entry->caller_ip,
1218                   __entry->bytes)
1219 );
1220
1221 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1222         TP_PROTO(struct btree_trans *trans,
1223                  unsigned long caller_ip,
1224                  struct btree_path *path,
1225                  unsigned old_u64s,
1226                  unsigned new_u64s),
1227         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1228
1229         TP_STRUCT__entry(
1230                 __array(char,                   trans_fn, 32    )
1231                 __field(unsigned long,          caller_ip       )
1232                 __field(enum btree_id,          btree_id        )
1233                 TRACE_BPOS_entries(pos)
1234                 __field(u32,                    old_u64s        )
1235                 __field(u32,                    new_u64s        )
1236         ),
1237
1238         TP_fast_assign(
1239                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1240                 __entry->caller_ip              = caller_ip;
1241
1242                 __entry->btree_id       = path->btree_id;
1243                 TRACE_BPOS_assign(pos, path->pos);
1244                 __entry->old_u64s       = old_u64s;
1245                 __entry->new_u64s       = new_u64s;
1246         ),
1247
1248         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1249                   __entry->trans_fn,
1250                   (void *) __entry->caller_ip,
1251                   bch2_btree_id_str(__entry->btree_id),
1252                   __entry->pos_inode,
1253                   __entry->pos_offset,
1254                   __entry->pos_snapshot,
1255                   __entry->old_u64s,
1256                   __entry->new_u64s)
1257 );
1258
1259 TRACE_EVENT(path_downgrade,
1260         TP_PROTO(struct btree_trans *trans,
1261                  unsigned long caller_ip,
1262                  struct btree_path *path),
1263         TP_ARGS(trans, caller_ip, path),
1264
1265         TP_STRUCT__entry(
1266                 __array(char,                   trans_fn, 32    )
1267                 __field(unsigned long,          caller_ip       )
1268         ),
1269
1270         TP_fast_assign(
1271                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1272                 __entry->caller_ip              = caller_ip;
1273         ),
1274
1275         TP_printk("%s %pS",
1276                   __entry->trans_fn,
1277                   (void *) __entry->caller_ip)
1278 );
1279
1280 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1281         TP_PROTO(struct btree_trans *trans,
1282                  unsigned long caller_ip),
1283         TP_ARGS(trans, caller_ip)
1284 );
1285
1286 TRACE_EVENT(write_buffer_flush,
1287         TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1288         TP_ARGS(trans, nr, skipped, fast, size),
1289
1290         TP_STRUCT__entry(
1291                 __field(size_t,         nr              )
1292                 __field(size_t,         skipped         )
1293                 __field(size_t,         fast            )
1294                 __field(size_t,         size            )
1295         ),
1296
1297         TP_fast_assign(
1298                 __entry->nr     = nr;
1299                 __entry->skipped = skipped;
1300                 __entry->fast   = fast;
1301                 __entry->size   = size;
1302         ),
1303
1304         TP_printk("%zu/%zu skipped %zu fast %zu",
1305                   __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1306 );
1307
1308 TRACE_EVENT(write_buffer_flush_slowpath,
1309         TP_PROTO(struct btree_trans *trans, size_t nr, size_t size),
1310         TP_ARGS(trans, nr, size),
1311
1312         TP_STRUCT__entry(
1313                 __field(size_t,         nr              )
1314                 __field(size_t,         size            )
1315         ),
1316
1317         TP_fast_assign(
1318                 __entry->nr     = nr;
1319                 __entry->size   = size;
1320         ),
1321
1322         TP_printk("%zu/%zu", __entry->nr, __entry->size)
1323 );
1324
1325 #endif /* _TRACE_BCACHEFS_H */
1326
1327 /* This part must be outside protection */
1328 #undef TRACE_INCLUDE_PATH
1329 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1330
1331 #undef TRACE_INCLUDE_FILE
1332 #define TRACE_INCLUDE_FILE trace
1333
1334 #include <trace/define_trace.h>