]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/trace.h
Update bcachefs sources to c3e4d892b77b mean and variance: Promote to lib/math
[bcachefs-tools-debian] / libbcachefs / trace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(fs_str,
36         TP_PROTO(struct bch_fs *c, const char *str),
37         TP_ARGS(c, str),
38
39         TP_STRUCT__entry(
40                 __field(dev_t,          dev                     )
41                 __string(str,           str                     )
42         ),
43
44         TP_fast_assign(
45                 __entry->dev            = c->dev;
46                 __assign_str(str, str);
47         ),
48
49         TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
50 );
51
52 DECLARE_EVENT_CLASS(trans_str,
53         TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
54         TP_ARGS(trans, caller_ip, str),
55
56         TP_STRUCT__entry(
57                 __field(dev_t,          dev                     )
58                 __array(char,           trans_fn, 32            )
59                 __field(unsigned long,  caller_ip               )
60                 __string(str,           str                     )
61         ),
62
63         TP_fast_assign(
64                 __entry->dev            = trans->c->dev;
65                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
66                 __entry->caller_ip              = caller_ip;
67                 __assign_str(str, str);
68         ),
69
70         TP_printk("%d,%d %s %pS %s",
71                   MAJOR(__entry->dev), MINOR(__entry->dev),
72                   __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
73 );
74
75 DECLARE_EVENT_CLASS(btree_node,
76         TP_PROTO(struct bch_fs *c, struct btree *b),
77         TP_ARGS(c, b),
78
79         TP_STRUCT__entry(
80                 __field(dev_t,          dev                     )
81                 __field(u8,             level                   )
82                 __field(u8,             btree_id                )
83                 TRACE_BPOS_entries(pos)
84         ),
85
86         TP_fast_assign(
87                 __entry->dev            = c->dev;
88                 __entry->level          = b->c.level;
89                 __entry->btree_id       = b->c.btree_id;
90                 TRACE_BPOS_assign(pos, b->key.k.p);
91         ),
92
93         TP_printk("%d,%d %u %s %llu:%llu:%u",
94                   MAJOR(__entry->dev), MINOR(__entry->dev),
95                   __entry->level,
96                   bch2_btree_id_str(__entry->btree_id),
97                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
98 );
99
100 DECLARE_EVENT_CLASS(bch_fs,
101         TP_PROTO(struct bch_fs *c),
102         TP_ARGS(c),
103
104         TP_STRUCT__entry(
105                 __field(dev_t,          dev                     )
106         ),
107
108         TP_fast_assign(
109                 __entry->dev            = c->dev;
110         ),
111
112         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
113 );
114
115 DECLARE_EVENT_CLASS(bio,
116         TP_PROTO(struct bio *bio),
117         TP_ARGS(bio),
118
119         TP_STRUCT__entry(
120                 __field(dev_t,          dev                     )
121                 __field(sector_t,       sector                  )
122                 __field(unsigned int,   nr_sector               )
123                 __array(char,           rwbs,   6               )
124         ),
125
126         TP_fast_assign(
127                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
128                 __entry->sector         = bio->bi_iter.bi_sector;
129                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
130                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
131         ),
132
133         TP_printk("%d,%d  %s %llu + %u",
134                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
135                   (unsigned long long)__entry->sector, __entry->nr_sector)
136 );
137
138 /* super-io.c: */
139 TRACE_EVENT(write_super,
140         TP_PROTO(struct bch_fs *c, unsigned long ip),
141         TP_ARGS(c, ip),
142
143         TP_STRUCT__entry(
144                 __field(dev_t,          dev     )
145                 __field(unsigned long,  ip      )
146         ),
147
148         TP_fast_assign(
149                 __entry->dev            = c->dev;
150                 __entry->ip             = ip;
151         ),
152
153         TP_printk("%d,%d for %pS",
154                   MAJOR(__entry->dev), MINOR(__entry->dev),
155                   (void *) __entry->ip)
156 );
157
158 /* io.c: */
159
160 DEFINE_EVENT(bio, read_promote,
161         TP_PROTO(struct bio *bio),
162         TP_ARGS(bio)
163 );
164
165 TRACE_EVENT(read_nopromote,
166         TP_PROTO(struct bch_fs *c, int ret),
167         TP_ARGS(c, ret),
168
169         TP_STRUCT__entry(
170                 __field(dev_t,          dev             )
171                 __array(char,           ret, 32         )
172         ),
173
174         TP_fast_assign(
175                 __entry->dev            = c->dev;
176                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
177         ),
178
179         TP_printk("%d,%d ret %s",
180                   MAJOR(__entry->dev), MINOR(__entry->dev),
181                   __entry->ret)
182 );
183
184 DEFINE_EVENT(bio, read_bounce,
185         TP_PROTO(struct bio *bio),
186         TP_ARGS(bio)
187 );
188
189 DEFINE_EVENT(bio, read_split,
190         TP_PROTO(struct bio *bio),
191         TP_ARGS(bio)
192 );
193
194 DEFINE_EVENT(bio, read_retry,
195         TP_PROTO(struct bio *bio),
196         TP_ARGS(bio)
197 );
198
199 DEFINE_EVENT(bio, read_reuse_race,
200         TP_PROTO(struct bio *bio),
201         TP_ARGS(bio)
202 );
203
204 /* Journal */
205
206 DEFINE_EVENT(bch_fs, journal_full,
207         TP_PROTO(struct bch_fs *c),
208         TP_ARGS(c)
209 );
210
211 DEFINE_EVENT(bch_fs, journal_entry_full,
212         TP_PROTO(struct bch_fs *c),
213         TP_ARGS(c)
214 );
215
216 TRACE_EVENT(journal_entry_close,
217         TP_PROTO(struct bch_fs *c, unsigned bytes),
218         TP_ARGS(c, bytes),
219
220         TP_STRUCT__entry(
221                 __field(dev_t,          dev                     )
222                 __field(u32,            bytes                   )
223         ),
224
225         TP_fast_assign(
226                 __entry->dev                    = c->dev;
227                 __entry->bytes                  = bytes;
228         ),
229
230         TP_printk("%d,%d entry bytes %u",
231                   MAJOR(__entry->dev), MINOR(__entry->dev),
232                   __entry->bytes)
233 );
234
235 DEFINE_EVENT(bio, journal_write,
236         TP_PROTO(struct bio *bio),
237         TP_ARGS(bio)
238 );
239
240 TRACE_EVENT(journal_reclaim_start,
241         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
242                  u64 min_nr, u64 min_key_cache,
243                  u64 btree_cache_dirty, u64 btree_cache_total,
244                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
245         TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
246                 btree_cache_dirty, btree_cache_total,
247                 btree_key_cache_dirty, btree_key_cache_total),
248
249         TP_STRUCT__entry(
250                 __field(dev_t,          dev                     )
251                 __field(bool,           direct                  )
252                 __field(bool,           kicked                  )
253                 __field(u64,            min_nr                  )
254                 __field(u64,            min_key_cache           )
255                 __field(u64,            btree_cache_dirty       )
256                 __field(u64,            btree_cache_total       )
257                 __field(u64,            btree_key_cache_dirty   )
258                 __field(u64,            btree_key_cache_total   )
259         ),
260
261         TP_fast_assign(
262                 __entry->dev                    = c->dev;
263                 __entry->direct                 = direct;
264                 __entry->kicked                 = kicked;
265                 __entry->min_nr                 = min_nr;
266                 __entry->min_key_cache          = min_key_cache;
267                 __entry->btree_cache_dirty      = btree_cache_dirty;
268                 __entry->btree_cache_total      = btree_cache_total;
269                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
270                 __entry->btree_key_cache_total  = btree_key_cache_total;
271         ),
272
273         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
274                   MAJOR(__entry->dev), MINOR(__entry->dev),
275                   __entry->direct,
276                   __entry->kicked,
277                   __entry->min_nr,
278                   __entry->min_key_cache,
279                   __entry->btree_cache_dirty,
280                   __entry->btree_cache_total,
281                   __entry->btree_key_cache_dirty,
282                   __entry->btree_key_cache_total)
283 );
284
285 TRACE_EVENT(journal_reclaim_finish,
286         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
287         TP_ARGS(c, nr_flushed),
288
289         TP_STRUCT__entry(
290                 __field(dev_t,          dev                     )
291                 __field(u64,            nr_flushed              )
292         ),
293
294         TP_fast_assign(
295                 __entry->dev            = c->dev;
296                 __entry->nr_flushed     = nr_flushed;
297         ),
298
299         TP_printk("%d,%d flushed %llu",
300                   MAJOR(__entry->dev), MINOR(__entry->dev),
301                   __entry->nr_flushed)
302 );
303
304 /* bset.c: */
305
306 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
307         TP_PROTO(const struct bpos *p),
308         TP_ARGS(p)
309 );
310
311 /* Btree cache: */
312
313 TRACE_EVENT(btree_cache_scan,
314         TP_PROTO(long nr_to_scan, long can_free, long ret),
315         TP_ARGS(nr_to_scan, can_free, ret),
316
317         TP_STRUCT__entry(
318                 __field(long,   nr_to_scan              )
319                 __field(long,   can_free                )
320                 __field(long,   ret                     )
321         ),
322
323         TP_fast_assign(
324                 __entry->nr_to_scan     = nr_to_scan;
325                 __entry->can_free       = can_free;
326                 __entry->ret            = ret;
327         ),
328
329         TP_printk("scanned for %li nodes, can free %li, ret %li",
330                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
331 );
332
333 DEFINE_EVENT(btree_node, btree_cache_reap,
334         TP_PROTO(struct bch_fs *c, struct btree *b),
335         TP_ARGS(c, b)
336 );
337
338 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
339         TP_PROTO(struct bch_fs *c),
340         TP_ARGS(c)
341 );
342
343 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
344         TP_PROTO(struct bch_fs *c),
345         TP_ARGS(c)
346 );
347
348 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
349         TP_PROTO(struct bch_fs *c),
350         TP_ARGS(c)
351 );
352
353 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
354         TP_PROTO(struct bch_fs *c),
355         TP_ARGS(c)
356 );
357
358 /* Btree */
359
360 DEFINE_EVENT(btree_node, btree_node_read,
361         TP_PROTO(struct bch_fs *c, struct btree *b),
362         TP_ARGS(c, b)
363 );
364
365 TRACE_EVENT(btree_node_write,
366         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
367         TP_ARGS(b, bytes, sectors),
368
369         TP_STRUCT__entry(
370                 __field(enum btree_node_type,   type)
371                 __field(unsigned,       bytes                   )
372                 __field(unsigned,       sectors                 )
373         ),
374
375         TP_fast_assign(
376                 __entry->type   = btree_node_type(b);
377                 __entry->bytes  = bytes;
378                 __entry->sectors = sectors;
379         ),
380
381         TP_printk("bkey type %u bytes %u sectors %u",
382                   __entry->type , __entry->bytes, __entry->sectors)
383 );
384
385 DEFINE_EVENT(btree_node, btree_node_alloc,
386         TP_PROTO(struct bch_fs *c, struct btree *b),
387         TP_ARGS(c, b)
388 );
389
390 DEFINE_EVENT(btree_node, btree_node_free,
391         TP_PROTO(struct bch_fs *c, struct btree *b),
392         TP_ARGS(c, b)
393 );
394
395 TRACE_EVENT(btree_reserve_get_fail,
396         TP_PROTO(const char *trans_fn,
397                  unsigned long caller_ip,
398                  size_t required,
399                  int ret),
400         TP_ARGS(trans_fn, caller_ip, required, ret),
401
402         TP_STRUCT__entry(
403                 __array(char,                   trans_fn, 32    )
404                 __field(unsigned long,          caller_ip       )
405                 __field(size_t,                 required        )
406                 __array(char,                   ret, 32         )
407         ),
408
409         TP_fast_assign(
410                 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
411                 __entry->caller_ip      = caller_ip;
412                 __entry->required       = required;
413                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
414         ),
415
416         TP_printk("%s %pS required %zu ret %s",
417                   __entry->trans_fn,
418                   (void *) __entry->caller_ip,
419                   __entry->required,
420                   __entry->ret)
421 );
422
423 DEFINE_EVENT(btree_node, btree_node_compact,
424         TP_PROTO(struct bch_fs *c, struct btree *b),
425         TP_ARGS(c, b)
426 );
427
428 DEFINE_EVENT(btree_node, btree_node_merge,
429         TP_PROTO(struct bch_fs *c, struct btree *b),
430         TP_ARGS(c, b)
431 );
432
433 DEFINE_EVENT(btree_node, btree_node_split,
434         TP_PROTO(struct bch_fs *c, struct btree *b),
435         TP_ARGS(c, b)
436 );
437
438 DEFINE_EVENT(btree_node, btree_node_rewrite,
439         TP_PROTO(struct bch_fs *c, struct btree *b),
440         TP_ARGS(c, b)
441 );
442
443 DEFINE_EVENT(btree_node, btree_node_set_root,
444         TP_PROTO(struct bch_fs *c, struct btree *b),
445         TP_ARGS(c, b)
446 );
447
448 TRACE_EVENT(btree_path_relock_fail,
449         TP_PROTO(struct btree_trans *trans,
450                  unsigned long caller_ip,
451                  struct btree_path *path,
452                  unsigned level),
453         TP_ARGS(trans, caller_ip, path, level),
454
455         TP_STRUCT__entry(
456                 __array(char,                   trans_fn, 32    )
457                 __field(unsigned long,          caller_ip       )
458                 __field(u8,                     btree_id        )
459                 __field(u8,                     level           )
460                 TRACE_BPOS_entries(pos)
461                 __array(char,                   node, 24        )
462                 __field(u8,                     self_read_count )
463                 __field(u8,                     self_intent_count)
464                 __field(u8,                     read_count      )
465                 __field(u8,                     intent_count    )
466                 __field(u32,                    iter_lock_seq   )
467                 __field(u32,                    node_lock_seq   )
468         ),
469
470         TP_fast_assign(
471                 struct btree *b = btree_path_node(path, level);
472                 struct six_lock_count c;
473
474                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
475                 __entry->caller_ip              = caller_ip;
476                 __entry->btree_id               = path->btree_id;
477                 __entry->level                  = path->level;
478                 TRACE_BPOS_assign(pos, path->pos);
479
480                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
481                 __entry->self_read_count        = c.n[SIX_LOCK_read];
482                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
483
484                 if (IS_ERR(b)) {
485                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
486                 } else {
487                         c = six_lock_counts(&path->l[level].b->c.lock);
488                         __entry->read_count     = c.n[SIX_LOCK_read];
489                         __entry->intent_count   = c.n[SIX_LOCK_intent];
490                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
491                 }
492                 __entry->iter_lock_seq          = path->l[level].lock_seq;
493                 __entry->node_lock_seq          = is_btree_node(path, level)
494                         ? six_lock_seq(&path->l[level].b->c.lock)
495                         : 0;
496         ),
497
498         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
499                   __entry->trans_fn,
500                   (void *) __entry->caller_ip,
501                   bch2_btree_id_str(__entry->btree_id),
502                   __entry->pos_inode,
503                   __entry->pos_offset,
504                   __entry->pos_snapshot,
505                   __entry->level,
506                   __entry->node,
507                   __entry->self_read_count,
508                   __entry->self_intent_count,
509                   __entry->read_count,
510                   __entry->intent_count,
511                   __entry->iter_lock_seq,
512                   __entry->node_lock_seq)
513 );
514
515 TRACE_EVENT(btree_path_upgrade_fail,
516         TP_PROTO(struct btree_trans *trans,
517                  unsigned long caller_ip,
518                  struct btree_path *path,
519                  unsigned level),
520         TP_ARGS(trans, caller_ip, path, level),
521
522         TP_STRUCT__entry(
523                 __array(char,                   trans_fn, 32    )
524                 __field(unsigned long,          caller_ip       )
525                 __field(u8,                     btree_id        )
526                 __field(u8,                     level           )
527                 TRACE_BPOS_entries(pos)
528                 __field(u8,                     locked          )
529                 __field(u8,                     self_read_count )
530                 __field(u8,                     self_intent_count)
531                 __field(u8,                     read_count      )
532                 __field(u8,                     intent_count    )
533                 __field(u32,                    iter_lock_seq   )
534                 __field(u32,                    node_lock_seq   )
535         ),
536
537         TP_fast_assign(
538                 struct six_lock_count c;
539
540                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
541                 __entry->caller_ip              = caller_ip;
542                 __entry->btree_id               = path->btree_id;
543                 __entry->level                  = level;
544                 TRACE_BPOS_assign(pos, path->pos);
545                 __entry->locked                 = btree_node_locked(path, level);
546
547                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
548                 __entry->self_read_count        = c.n[SIX_LOCK_read];
549                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
550                 c = six_lock_counts(&path->l[level].b->c.lock);
551                 __entry->read_count             = c.n[SIX_LOCK_read];
552                 __entry->intent_count           = c.n[SIX_LOCK_intent];
553                 __entry->iter_lock_seq          = path->l[level].lock_seq;
554                 __entry->node_lock_seq          = is_btree_node(path, level)
555                         ? six_lock_seq(&path->l[level].b->c.lock)
556                         : 0;
557         ),
558
559         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
560                   __entry->trans_fn,
561                   (void *) __entry->caller_ip,
562                   bch2_btree_id_str(__entry->btree_id),
563                   __entry->pos_inode,
564                   __entry->pos_offset,
565                   __entry->pos_snapshot,
566                   __entry->level,
567                   __entry->locked,
568                   __entry->self_read_count,
569                   __entry->self_intent_count,
570                   __entry->read_count,
571                   __entry->intent_count,
572                   __entry->iter_lock_seq,
573                   __entry->node_lock_seq)
574 );
575
576 /* Garbage collection */
577
578 DEFINE_EVENT(bch_fs, gc_gens_start,
579         TP_PROTO(struct bch_fs *c),
580         TP_ARGS(c)
581 );
582
583 DEFINE_EVENT(bch_fs, gc_gens_end,
584         TP_PROTO(struct bch_fs *c),
585         TP_ARGS(c)
586 );
587
588 /* Allocator */
589
590 DECLARE_EVENT_CLASS(bucket_alloc,
591         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
592                  u64 bucket,
593                  u64 free,
594                  u64 avail,
595                  u64 copygc_wait_amount,
596                  s64 copygc_waiting_for,
597                  struct bucket_alloc_state *s,
598                  bool nonblocking,
599                  const char *err),
600         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
601                 copygc_wait_amount, copygc_waiting_for,
602                 s, nonblocking, err),
603
604         TP_STRUCT__entry(
605                 __field(u8,                     dev                     )
606                 __array(char,   reserve,        16                      )
607                 __field(u64,                    bucket  )
608                 __field(u64,                    free                    )
609                 __field(u64,                    avail                   )
610                 __field(u64,                    copygc_wait_amount      )
611                 __field(s64,                    copygc_waiting_for      )
612                 __field(u64,                    seen                    )
613                 __field(u64,                    open                    )
614                 __field(u64,                    need_journal_commit     )
615                 __field(u64,                    nouse                   )
616                 __field(bool,                   nonblocking             )
617                 __field(u64,                    nocow                   )
618                 __array(char,                   err,    32              )
619         ),
620
621         TP_fast_assign(
622                 __entry->dev            = ca->dev_idx;
623                 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
624                 __entry->bucket         = bucket;
625                 __entry->free           = free;
626                 __entry->avail          = avail;
627                 __entry->copygc_wait_amount     = copygc_wait_amount;
628                 __entry->copygc_waiting_for     = copygc_waiting_for;
629                 __entry->seen           = s->buckets_seen;
630                 __entry->open           = s->skipped_open;
631                 __entry->need_journal_commit = s->skipped_need_journal_commit;
632                 __entry->nouse          = s->skipped_nouse;
633                 __entry->nonblocking    = nonblocking;
634                 __entry->nocow          = s->skipped_nocow;
635                 strscpy(__entry->err, err, sizeof(__entry->err));
636         ),
637
638         TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
639                   __entry->reserve,
640                   __entry->dev,
641                   __entry->bucket,
642                   __entry->free,
643                   __entry->avail,
644                   __entry->copygc_wait_amount,
645                   __entry->copygc_waiting_for,
646                   __entry->seen,
647                   __entry->open,
648                   __entry->need_journal_commit,
649                   __entry->nouse,
650                   __entry->nocow,
651                   __entry->nonblocking,
652                   __entry->err)
653 );
654
655 DEFINE_EVENT(bucket_alloc, bucket_alloc,
656         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
657                  u64 bucket,
658                  u64 free,
659                  u64 avail,
660                  u64 copygc_wait_amount,
661                  s64 copygc_waiting_for,
662                  struct bucket_alloc_state *s,
663                  bool nonblocking,
664                  const char *err),
665         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
666                 copygc_wait_amount, copygc_waiting_for,
667                 s, nonblocking, err)
668 );
669
670 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
671         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
672                  u64 bucket,
673                  u64 free,
674                  u64 avail,
675                  u64 copygc_wait_amount,
676                  s64 copygc_waiting_for,
677                  struct bucket_alloc_state *s,
678                  bool nonblocking,
679                  const char *err),
680         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
681                 copygc_wait_amount, copygc_waiting_for,
682                 s, nonblocking, err)
683 );
684
685 TRACE_EVENT(discard_buckets,
686         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
687                  u64 need_journal_commit, u64 discarded, const char *err),
688         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
689
690         TP_STRUCT__entry(
691                 __field(dev_t,          dev                     )
692                 __field(u64,            seen                    )
693                 __field(u64,            open                    )
694                 __field(u64,            need_journal_commit     )
695                 __field(u64,            discarded               )
696                 __array(char,           err,    16              )
697         ),
698
699         TP_fast_assign(
700                 __entry->dev                    = c->dev;
701                 __entry->seen                   = seen;
702                 __entry->open                   = open;
703                 __entry->need_journal_commit    = need_journal_commit;
704                 __entry->discarded              = discarded;
705                 strscpy(__entry->err, err, sizeof(__entry->err));
706         ),
707
708         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
709                   MAJOR(__entry->dev), MINOR(__entry->dev),
710                   __entry->seen,
711                   __entry->open,
712                   __entry->need_journal_commit,
713                   __entry->discarded,
714                   __entry->err)
715 );
716
717 TRACE_EVENT(bucket_invalidate,
718         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
719         TP_ARGS(c, dev, bucket, sectors),
720
721         TP_STRUCT__entry(
722                 __field(dev_t,          dev                     )
723                 __field(u32,            dev_idx                 )
724                 __field(u32,            sectors                 )
725                 __field(u64,            bucket                  )
726         ),
727
728         TP_fast_assign(
729                 __entry->dev            = c->dev;
730                 __entry->dev_idx        = dev;
731                 __entry->sectors        = sectors;
732                 __entry->bucket         = bucket;
733         ),
734
735         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
736                   MAJOR(__entry->dev), MINOR(__entry->dev),
737                   __entry->dev_idx, __entry->bucket,
738                   __entry->sectors)
739 );
740
741 /* Moving IO */
742
743 TRACE_EVENT(bucket_evacuate,
744         TP_PROTO(struct bch_fs *c, struct bpos *bucket),
745         TP_ARGS(c, bucket),
746
747         TP_STRUCT__entry(
748                 __field(dev_t,          dev                     )
749                 __field(u32,            dev_idx                 )
750                 __field(u64,            bucket                  )
751         ),
752
753         TP_fast_assign(
754                 __entry->dev            = c->dev;
755                 __entry->dev_idx        = bucket->inode;
756                 __entry->bucket         = bucket->offset;
757         ),
758
759         TP_printk("%d:%d %u:%llu",
760                   MAJOR(__entry->dev), MINOR(__entry->dev),
761                   __entry->dev_idx, __entry->bucket)
762 );
763
764 DEFINE_EVENT(fs_str, move_extent,
765         TP_PROTO(struct bch_fs *c, const char *k),
766         TP_ARGS(c, k)
767 );
768
769 DEFINE_EVENT(fs_str, move_extent_read,
770         TP_PROTO(struct bch_fs *c, const char *k),
771         TP_ARGS(c, k)
772 );
773
774 DEFINE_EVENT(fs_str, move_extent_write,
775         TP_PROTO(struct bch_fs *c, const char *k),
776         TP_ARGS(c, k)
777 );
778
779 DEFINE_EVENT(fs_str, move_extent_finish,
780         TP_PROTO(struct bch_fs *c, const char *k),
781         TP_ARGS(c, k)
782 );
783
784 TRACE_EVENT(move_extent_fail,
785         TP_PROTO(struct bch_fs *c, const char *msg),
786         TP_ARGS(c, msg),
787
788         TP_STRUCT__entry(
789                 __field(dev_t,          dev                     )
790                 __string(msg,           msg                     )
791         ),
792
793         TP_fast_assign(
794                 __entry->dev            = c->dev;
795                 __assign_str(msg, msg);
796         ),
797
798         TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
799 );
800
801 DEFINE_EVENT(fs_str, move_extent_start_fail,
802         TP_PROTO(struct bch_fs *c, const char *str),
803         TP_ARGS(c, str)
804 );
805
806 TRACE_EVENT(move_data,
807         TP_PROTO(struct bch_fs *c,
808                  struct bch_move_stats *stats),
809         TP_ARGS(c, stats),
810
811         TP_STRUCT__entry(
812                 __field(dev_t,          dev             )
813                 __field(u64,            keys_moved      )
814                 __field(u64,            keys_raced      )
815                 __field(u64,            sectors_seen    )
816                 __field(u64,            sectors_moved   )
817                 __field(u64,            sectors_raced   )
818         ),
819
820         TP_fast_assign(
821                 __entry->dev            = c->dev;
822                 __entry->keys_moved     = atomic64_read(&stats->keys_moved);
823                 __entry->keys_raced     = atomic64_read(&stats->keys_raced);
824                 __entry->sectors_seen   = atomic64_read(&stats->sectors_seen);
825                 __entry->sectors_moved  = atomic64_read(&stats->sectors_moved);
826                 __entry->sectors_raced  = atomic64_read(&stats->sectors_raced);
827         ),
828
829         TP_printk("%d,%d keys moved %llu raced %llu"
830                   "sectors seen %llu moved %llu raced %llu",
831                   MAJOR(__entry->dev), MINOR(__entry->dev),
832                   __entry->keys_moved,
833                   __entry->keys_raced,
834                   __entry->sectors_seen,
835                   __entry->sectors_moved,
836                   __entry->sectors_raced)
837 );
838
839 TRACE_EVENT(evacuate_bucket,
840         TP_PROTO(struct bch_fs *c, struct bpos *bucket,
841                  unsigned sectors, unsigned bucket_size,
842                  u64 fragmentation, int ret),
843         TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
844
845         TP_STRUCT__entry(
846                 __field(dev_t,          dev             )
847                 __field(u64,            member          )
848                 __field(u64,            bucket          )
849                 __field(u32,            sectors         )
850                 __field(u32,            bucket_size     )
851                 __field(u64,            fragmentation   )
852                 __field(int,            ret             )
853         ),
854
855         TP_fast_assign(
856                 __entry->dev                    = c->dev;
857                 __entry->member                 = bucket->inode;
858                 __entry->bucket                 = bucket->offset;
859                 __entry->sectors                = sectors;
860                 __entry->bucket_size            = bucket_size;
861                 __entry->fragmentation          = fragmentation;
862                 __entry->ret                    = ret;
863         ),
864
865         TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
866                   MAJOR(__entry->dev), MINOR(__entry->dev),
867                   __entry->member, __entry->bucket,
868                   __entry->sectors, __entry->bucket_size,
869                   __entry->fragmentation, __entry->ret)
870 );
871
872 TRACE_EVENT(copygc,
873         TP_PROTO(struct bch_fs *c,
874                  u64 sectors_moved, u64 sectors_not_moved,
875                  u64 buckets_moved, u64 buckets_not_moved),
876         TP_ARGS(c,
877                 sectors_moved, sectors_not_moved,
878                 buckets_moved, buckets_not_moved),
879
880         TP_STRUCT__entry(
881                 __field(dev_t,          dev                     )
882                 __field(u64,            sectors_moved           )
883                 __field(u64,            sectors_not_moved       )
884                 __field(u64,            buckets_moved           )
885                 __field(u64,            buckets_not_moved       )
886         ),
887
888         TP_fast_assign(
889                 __entry->dev                    = c->dev;
890                 __entry->sectors_moved          = sectors_moved;
891                 __entry->sectors_not_moved      = sectors_not_moved;
892                 __entry->buckets_moved          = buckets_moved;
893                 __entry->buckets_not_moved = buckets_moved;
894         ),
895
896         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
897                   MAJOR(__entry->dev), MINOR(__entry->dev),
898                   __entry->sectors_moved, __entry->sectors_not_moved,
899                   __entry->buckets_moved, __entry->buckets_not_moved)
900 );
901
902 TRACE_EVENT(copygc_wait,
903         TP_PROTO(struct bch_fs *c,
904                  u64 wait_amount, u64 until),
905         TP_ARGS(c, wait_amount, until),
906
907         TP_STRUCT__entry(
908                 __field(dev_t,          dev                     )
909                 __field(u64,            wait_amount             )
910                 __field(u64,            until                   )
911         ),
912
913         TP_fast_assign(
914                 __entry->dev            = c->dev;
915                 __entry->wait_amount    = wait_amount;
916                 __entry->until          = until;
917         ),
918
919         TP_printk("%d,%u waiting for %llu sectors until %llu",
920                   MAJOR(__entry->dev), MINOR(__entry->dev),
921                   __entry->wait_amount, __entry->until)
922 );
923
924 /* btree transactions: */
925
926 DECLARE_EVENT_CLASS(transaction_event,
927         TP_PROTO(struct btree_trans *trans,
928                  unsigned long caller_ip),
929         TP_ARGS(trans, caller_ip),
930
931         TP_STRUCT__entry(
932                 __array(char,                   trans_fn, 32    )
933                 __field(unsigned long,          caller_ip       )
934         ),
935
936         TP_fast_assign(
937                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
938                 __entry->caller_ip              = caller_ip;
939         ),
940
941         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
942 );
943
944 DEFINE_EVENT(transaction_event, transaction_commit,
945         TP_PROTO(struct btree_trans *trans,
946                  unsigned long caller_ip),
947         TP_ARGS(trans, caller_ip)
948 );
949
950 DEFINE_EVENT(transaction_event, trans_restart_injected,
951         TP_PROTO(struct btree_trans *trans,
952                  unsigned long caller_ip),
953         TP_ARGS(trans, caller_ip)
954 );
955
956 TRACE_EVENT(trans_restart_split_race,
957         TP_PROTO(struct btree_trans *trans,
958                  unsigned long caller_ip,
959                  struct btree *b),
960         TP_ARGS(trans, caller_ip, b),
961
962         TP_STRUCT__entry(
963                 __array(char,                   trans_fn, 32    )
964                 __field(unsigned long,          caller_ip       )
965                 __field(u8,                     level           )
966                 __field(u16,                    written         )
967                 __field(u16,                    blocks          )
968                 __field(u16,                    u64s_remaining  )
969         ),
970
971         TP_fast_assign(
972                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
973                 __entry->caller_ip              = caller_ip;
974                 __entry->level          = b->c.level;
975                 __entry->written        = b->written;
976                 __entry->blocks         = btree_blocks(trans->c);
977                 __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
978         ),
979
980         TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
981                   __entry->trans_fn, (void *) __entry->caller_ip,
982                   __entry->level,
983                   __entry->written, __entry->blocks,
984                   __entry->u64s_remaining)
985 );
986
987 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
988         TP_PROTO(struct btree_trans *trans,
989                  unsigned long caller_ip),
990         TP_ARGS(trans, caller_ip)
991 );
992
993 TRACE_EVENT(trans_restart_journal_preres_get,
994         TP_PROTO(struct btree_trans *trans,
995                  unsigned long caller_ip,
996                  unsigned flags),
997         TP_ARGS(trans, caller_ip, flags),
998
999         TP_STRUCT__entry(
1000                 __array(char,                   trans_fn, 32    )
1001                 __field(unsigned long,          caller_ip       )
1002                 __field(unsigned,               flags           )
1003         ),
1004
1005         TP_fast_assign(
1006                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1007                 __entry->caller_ip              = caller_ip;
1008                 __entry->flags                  = flags;
1009         ),
1010
1011         TP_printk("%s %pS %x", __entry->trans_fn,
1012                   (void *) __entry->caller_ip,
1013                   __entry->flags)
1014 );
1015
1016 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
1017         TP_PROTO(struct btree_trans *trans,
1018                  unsigned long caller_ip),
1019         TP_ARGS(trans, caller_ip)
1020 );
1021
1022 DEFINE_EVENT(transaction_event, trans_traverse_all,
1023         TP_PROTO(struct btree_trans *trans,
1024                  unsigned long caller_ip),
1025         TP_ARGS(trans, caller_ip)
1026 );
1027
1028 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
1029         TP_PROTO(struct btree_trans *trans,
1030                  unsigned long caller_ip),
1031         TP_ARGS(trans, caller_ip)
1032 );
1033
1034 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1035         TP_PROTO(struct btree_trans *trans,
1036                  unsigned long caller_ip,
1037                  const char *paths),
1038         TP_ARGS(trans, caller_ip, paths)
1039 );
1040
1041 DECLARE_EVENT_CLASS(transaction_restart_iter,
1042         TP_PROTO(struct btree_trans *trans,
1043                  unsigned long caller_ip,
1044                  struct btree_path *path),
1045         TP_ARGS(trans, caller_ip, path),
1046
1047         TP_STRUCT__entry(
1048                 __array(char,                   trans_fn, 32    )
1049                 __field(unsigned long,          caller_ip       )
1050                 __field(u8,                     btree_id        )
1051                 TRACE_BPOS_entries(pos)
1052         ),
1053
1054         TP_fast_assign(
1055                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1056                 __entry->caller_ip              = caller_ip;
1057                 __entry->btree_id               = path->btree_id;
1058                 TRACE_BPOS_assign(pos, path->pos)
1059         ),
1060
1061         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1062                   __entry->trans_fn,
1063                   (void *) __entry->caller_ip,
1064                   bch2_btree_id_str(__entry->btree_id),
1065                   __entry->pos_inode,
1066                   __entry->pos_offset,
1067                   __entry->pos_snapshot)
1068 );
1069
1070 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
1071         TP_PROTO(struct btree_trans *trans,
1072                  unsigned long caller_ip,
1073                  struct btree_path *path),
1074         TP_ARGS(trans, caller_ip, path)
1075 );
1076
1077 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
1078         TP_PROTO(struct btree_trans *trans,
1079                  unsigned long caller_ip,
1080                  struct btree_path *path),
1081         TP_ARGS(trans, caller_ip, path)
1082 );
1083
1084 struct get_locks_fail;
1085
1086 TRACE_EVENT(trans_restart_upgrade,
1087         TP_PROTO(struct btree_trans *trans,
1088                  unsigned long caller_ip,
1089                  struct btree_path *path,
1090                  unsigned old_locks_want,
1091                  unsigned new_locks_want,
1092                  struct get_locks_fail *f),
1093         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1094
1095         TP_STRUCT__entry(
1096                 __array(char,                   trans_fn, 32    )
1097                 __field(unsigned long,          caller_ip       )
1098                 __field(u8,                     btree_id        )
1099                 __field(u8,                     old_locks_want  )
1100                 __field(u8,                     new_locks_want  )
1101                 __field(u8,                     level           )
1102                 __field(u32,                    path_seq        )
1103                 __field(u32,                    node_seq        )
1104                 __field(u32,                    path_alloc_seq  )
1105                 __field(u32,                    downgrade_seq)
1106                 TRACE_BPOS_entries(pos)
1107         ),
1108
1109         TP_fast_assign(
1110                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1111                 __entry->caller_ip              = caller_ip;
1112                 __entry->btree_id               = path->btree_id;
1113                 __entry->old_locks_want         = old_locks_want;
1114                 __entry->new_locks_want         = new_locks_want;
1115                 __entry->level                  = f->l;
1116                 __entry->path_seq               = path->l[f->l].lock_seq;
1117                 __entry->node_seq               = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1118                 __entry->path_alloc_seq         = path->alloc_seq;
1119                 __entry->downgrade_seq          = path->downgrade_seq;
1120                 TRACE_BPOS_assign(pos, path->pos)
1121         ),
1122
1123         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
1124                   __entry->trans_fn,
1125                   (void *) __entry->caller_ip,
1126                   bch2_btree_id_str(__entry->btree_id),
1127                   __entry->pos_inode,
1128                   __entry->pos_offset,
1129                   __entry->pos_snapshot,
1130                   __entry->old_locks_want,
1131                   __entry->new_locks_want,
1132                   __entry->level,
1133                   __entry->path_seq,
1134                   __entry->node_seq,
1135                   __entry->path_alloc_seq,
1136                   __entry->downgrade_seq)
1137 );
1138
1139 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
1140         TP_PROTO(struct btree_trans *trans,
1141                  unsigned long caller_ip,
1142                  struct btree_path *path),
1143         TP_ARGS(trans, caller_ip, path)
1144 );
1145
1146 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
1147         TP_PROTO(struct btree_trans *trans,
1148                  unsigned long caller_ip,
1149                  struct btree_path *path),
1150         TP_ARGS(trans, caller_ip, path)
1151 );
1152
1153 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
1154         TP_PROTO(struct btree_trans *trans,
1155                  unsigned long caller_ip,
1156                  struct btree_path *path),
1157         TP_ARGS(trans, caller_ip, path)
1158 );
1159
1160 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
1161         TP_PROTO(struct btree_trans *trans,
1162                  unsigned long caller_ip,
1163                  struct btree_path *path),
1164         TP_ARGS(trans, caller_ip, path)
1165 );
1166
1167 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1168         TP_PROTO(struct btree_trans *trans,
1169                  unsigned long caller_ip),
1170         TP_ARGS(trans, caller_ip)
1171 );
1172
1173 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
1174         TP_PROTO(struct btree_trans *trans,
1175                  unsigned long caller_ip,
1176                  struct btree_path *path),
1177         TP_ARGS(trans, caller_ip, path)
1178 );
1179
1180 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
1181         TP_PROTO(struct btree_trans *trans,
1182                  unsigned long caller_ip,
1183                  struct btree_path *path),
1184         TP_ARGS(trans, caller_ip, path)
1185 );
1186
1187 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
1188         TP_PROTO(struct btree_trans *trans,
1189                  unsigned long caller_ip,
1190                  struct btree_path *path),
1191         TP_ARGS(trans, caller_ip, path)
1192 );
1193
1194 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
1195         TP_PROTO(struct btree_trans *trans,
1196                  unsigned long caller_ip,
1197                  struct btree_path *path),
1198         TP_ARGS(trans, caller_ip, path)
1199 );
1200
1201 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1202         TP_PROTO(struct btree_trans *trans,
1203                  unsigned long caller_ip,
1204                  struct btree_path *path),
1205         TP_ARGS(trans, caller_ip, path)
1206 );
1207
1208 DEFINE_EVENT(trans_str, trans_restart_would_deadlock,
1209         TP_PROTO(struct btree_trans *trans,
1210                  unsigned long caller_ip,
1211                  const char *cycle),
1212         TP_ARGS(trans, caller_ip, cycle)
1213 );
1214
1215 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1216         TP_PROTO(struct btree_trans *trans,
1217                  unsigned long caller_ip),
1218         TP_ARGS(trans, caller_ip)
1219 );
1220
1221 TRACE_EVENT(trans_restart_would_deadlock_write,
1222         TP_PROTO(struct btree_trans *trans),
1223         TP_ARGS(trans),
1224
1225         TP_STRUCT__entry(
1226                 __array(char,                   trans_fn, 32    )
1227         ),
1228
1229         TP_fast_assign(
1230                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1231         ),
1232
1233         TP_printk("%s", __entry->trans_fn)
1234 );
1235
1236 TRACE_EVENT(trans_restart_mem_realloced,
1237         TP_PROTO(struct btree_trans *trans,
1238                  unsigned long caller_ip,
1239                  unsigned long bytes),
1240         TP_ARGS(trans, caller_ip, bytes),
1241
1242         TP_STRUCT__entry(
1243                 __array(char,                   trans_fn, 32    )
1244                 __field(unsigned long,          caller_ip       )
1245                 __field(unsigned long,          bytes           )
1246         ),
1247
1248         TP_fast_assign(
1249                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1250                 __entry->caller_ip      = caller_ip;
1251                 __entry->bytes          = bytes;
1252         ),
1253
1254         TP_printk("%s %pS bytes %lu",
1255                   __entry->trans_fn,
1256                   (void *) __entry->caller_ip,
1257                   __entry->bytes)
1258 );
1259
1260 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1261         TP_PROTO(struct btree_trans *trans,
1262                  unsigned long caller_ip,
1263                  struct btree_path *path,
1264                  unsigned old_u64s,
1265                  unsigned new_u64s),
1266         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1267
1268         TP_STRUCT__entry(
1269                 __array(char,                   trans_fn, 32    )
1270                 __field(unsigned long,          caller_ip       )
1271                 __field(enum btree_id,          btree_id        )
1272                 TRACE_BPOS_entries(pos)
1273                 __field(u32,                    old_u64s        )
1274                 __field(u32,                    new_u64s        )
1275         ),
1276
1277         TP_fast_assign(
1278                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1279                 __entry->caller_ip              = caller_ip;
1280
1281                 __entry->btree_id       = path->btree_id;
1282                 TRACE_BPOS_assign(pos, path->pos);
1283                 __entry->old_u64s       = old_u64s;
1284                 __entry->new_u64s       = new_u64s;
1285         ),
1286
1287         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1288                   __entry->trans_fn,
1289                   (void *) __entry->caller_ip,
1290                   bch2_btree_id_str(__entry->btree_id),
1291                   __entry->pos_inode,
1292                   __entry->pos_offset,
1293                   __entry->pos_snapshot,
1294                   __entry->old_u64s,
1295                   __entry->new_u64s)
1296 );
1297
1298 TRACE_EVENT(path_downgrade,
1299         TP_PROTO(struct btree_trans *trans,
1300                  unsigned long caller_ip,
1301                  struct btree_path *path,
1302                  unsigned old_locks_want),
1303         TP_ARGS(trans, caller_ip, path, old_locks_want),
1304
1305         TP_STRUCT__entry(
1306                 __array(char,                   trans_fn, 32    )
1307                 __field(unsigned long,          caller_ip       )
1308                 __field(unsigned,               old_locks_want  )
1309                 __field(unsigned,               new_locks_want  )
1310                 __field(unsigned,               btree           )
1311                 TRACE_BPOS_entries(pos)
1312         ),
1313
1314         TP_fast_assign(
1315                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1316                 __entry->caller_ip              = caller_ip;
1317                 __entry->old_locks_want         = old_locks_want;
1318                 __entry->new_locks_want         = path->locks_want;
1319                 __entry->btree                  = path->btree_id;
1320                 TRACE_BPOS_assign(pos, path->pos);
1321         ),
1322
1323         TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1324                   __entry->trans_fn,
1325                   (void *) __entry->caller_ip,
1326                   __entry->old_locks_want,
1327                   __entry->new_locks_want,
1328                   bch2_btree_id_str(__entry->btree),
1329                   __entry->pos_inode,
1330                   __entry->pos_offset,
1331                   __entry->pos_snapshot)
1332 );
1333
1334 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1335         TP_PROTO(struct btree_trans *trans,
1336                  unsigned long caller_ip),
1337         TP_ARGS(trans, caller_ip)
1338 );
1339
1340 TRACE_EVENT(write_buffer_flush,
1341         TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1342         TP_ARGS(trans, nr, skipped, fast, size),
1343
1344         TP_STRUCT__entry(
1345                 __field(size_t,         nr              )
1346                 __field(size_t,         skipped         )
1347                 __field(size_t,         fast            )
1348                 __field(size_t,         size            )
1349         ),
1350
1351         TP_fast_assign(
1352                 __entry->nr     = nr;
1353                 __entry->skipped = skipped;
1354                 __entry->fast   = fast;
1355                 __entry->size   = size;
1356         ),
1357
1358         TP_printk("%zu/%zu skipped %zu fast %zu",
1359                   __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1360 );
1361
1362 TRACE_EVENT(write_buffer_flush_sync,
1363         TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1364         TP_ARGS(trans, caller_ip),
1365
1366         TP_STRUCT__entry(
1367                 __array(char,                   trans_fn, 32    )
1368                 __field(unsigned long,          caller_ip       )
1369         ),
1370
1371         TP_fast_assign(
1372                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1373                 __entry->caller_ip              = caller_ip;
1374         ),
1375
1376         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1377 );
1378
1379 TRACE_EVENT(write_buffer_flush_slowpath,
1380         TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1381         TP_ARGS(trans, slowpath, total),
1382
1383         TP_STRUCT__entry(
1384                 __field(size_t,         slowpath        )
1385                 __field(size_t,         total           )
1386         ),
1387
1388         TP_fast_assign(
1389                 __entry->slowpath       = slowpath;
1390                 __entry->total          = total;
1391         ),
1392
1393         TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1394 );
1395
1396 DEFINE_EVENT(fs_str, rebalance_extent,
1397         TP_PROTO(struct bch_fs *c, const char *str),
1398         TP_ARGS(c, str)
1399 );
1400
1401 DEFINE_EVENT(fs_str, data_update,
1402         TP_PROTO(struct bch_fs *c, const char *str),
1403         TP_ARGS(c, str)
1404 );
1405
1406 #endif /* _TRACE_BCACHEFS_H */
1407
1408 /* This part must be outside protection */
1409 #undef TRACE_INCLUDE_PATH
1410 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1411
1412 #undef TRACE_INCLUDE_FILE
1413 #define TRACE_INCLUDE_FILE trace
1414
1415 #include <trace/define_trace.h>