]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/trace.h
Update bcachefs sources to 7c0fe6f104 bcachefs: Fix bch2_fsck_ask_yn()
[bcachefs-tools-debian] / libbcachefs / trace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(bkey,
36         TP_PROTO(struct bch_fs *c, const char *k),
37         TP_ARGS(c, k),
38
39         TP_STRUCT__entry(
40                 __string(k,     k                               )
41         ),
42
43         TP_fast_assign(
44                 __assign_str(k, k);
45         ),
46
47         TP_printk("%s", __get_str(k))
48 );
49
50 DECLARE_EVENT_CLASS(btree_node,
51         TP_PROTO(struct bch_fs *c, struct btree *b),
52         TP_ARGS(c, b),
53
54         TP_STRUCT__entry(
55                 __field(dev_t,          dev                     )
56                 __field(u8,             level                   )
57                 __field(u8,             btree_id                )
58                 TRACE_BPOS_entries(pos)
59         ),
60
61         TP_fast_assign(
62                 __entry->dev            = c->dev;
63                 __entry->level          = b->c.level;
64                 __entry->btree_id       = b->c.btree_id;
65                 TRACE_BPOS_assign(pos, b->key.k.p);
66         ),
67
68         TP_printk("%d,%d %u %s %llu:%llu:%u",
69                   MAJOR(__entry->dev), MINOR(__entry->dev),
70                   __entry->level,
71                   bch2_btree_ids[__entry->btree_id],
72                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
73 );
74
75 DECLARE_EVENT_CLASS(bch_fs,
76         TP_PROTO(struct bch_fs *c),
77         TP_ARGS(c),
78
79         TP_STRUCT__entry(
80                 __field(dev_t,          dev                     )
81         ),
82
83         TP_fast_assign(
84                 __entry->dev            = c->dev;
85         ),
86
87         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
88 );
89
90 DECLARE_EVENT_CLASS(bio,
91         TP_PROTO(struct bio *bio),
92         TP_ARGS(bio),
93
94         TP_STRUCT__entry(
95                 __field(dev_t,          dev                     )
96                 __field(sector_t,       sector                  )
97                 __field(unsigned int,   nr_sector               )
98                 __array(char,           rwbs,   6               )
99         ),
100
101         TP_fast_assign(
102                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
103                 __entry->sector         = bio->bi_iter.bi_sector;
104                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
105                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
106         ),
107
108         TP_printk("%d,%d  %s %llu + %u",
109                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110                   (unsigned long long)__entry->sector, __entry->nr_sector)
111 );
112
113 /* super-io.c: */
114 TRACE_EVENT(write_super,
115         TP_PROTO(struct bch_fs *c, unsigned long ip),
116         TP_ARGS(c, ip),
117
118         TP_STRUCT__entry(
119                 __field(dev_t,          dev     )
120                 __field(unsigned long,  ip      )
121         ),
122
123         TP_fast_assign(
124                 __entry->dev            = c->dev;
125                 __entry->ip             = ip;
126         ),
127
128         TP_printk("%d,%d for %pS",
129                   MAJOR(__entry->dev), MINOR(__entry->dev),
130                   (void *) __entry->ip)
131 );
132
133 /* io.c: */
134
135 DEFINE_EVENT(bio, read_promote,
136         TP_PROTO(struct bio *bio),
137         TP_ARGS(bio)
138 );
139
140 DEFINE_EVENT(bio, read_bounce,
141         TP_PROTO(struct bio *bio),
142         TP_ARGS(bio)
143 );
144
145 DEFINE_EVENT(bio, read_split,
146         TP_PROTO(struct bio *bio),
147         TP_ARGS(bio)
148 );
149
150 DEFINE_EVENT(bio, read_retry,
151         TP_PROTO(struct bio *bio),
152         TP_ARGS(bio)
153 );
154
155 DEFINE_EVENT(bio, read_reuse_race,
156         TP_PROTO(struct bio *bio),
157         TP_ARGS(bio)
158 );
159
160 /* Journal */
161
162 DEFINE_EVENT(bch_fs, journal_full,
163         TP_PROTO(struct bch_fs *c),
164         TP_ARGS(c)
165 );
166
167 DEFINE_EVENT(bch_fs, journal_entry_full,
168         TP_PROTO(struct bch_fs *c),
169         TP_ARGS(c)
170 );
171
172 DEFINE_EVENT(bio, journal_write,
173         TP_PROTO(struct bio *bio),
174         TP_ARGS(bio)
175 );
176
177 TRACE_EVENT(journal_reclaim_start,
178         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
179                  u64 min_nr, u64 min_key_cache,
180                  u64 prereserved, u64 prereserved_total,
181                  u64 btree_cache_dirty, u64 btree_cache_total,
182                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
183         TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
184                 btree_cache_dirty, btree_cache_total,
185                 btree_key_cache_dirty, btree_key_cache_total),
186
187         TP_STRUCT__entry(
188                 __field(dev_t,          dev                     )
189                 __field(bool,           direct                  )
190                 __field(bool,           kicked                  )
191                 __field(u64,            min_nr                  )
192                 __field(u64,            min_key_cache           )
193                 __field(u64,            prereserved             )
194                 __field(u64,            prereserved_total       )
195                 __field(u64,            btree_cache_dirty       )
196                 __field(u64,            btree_cache_total       )
197                 __field(u64,            btree_key_cache_dirty   )
198                 __field(u64,            btree_key_cache_total   )
199         ),
200
201         TP_fast_assign(
202                 __entry->dev                    = c->dev;
203                 __entry->direct                 = direct;
204                 __entry->kicked                 = kicked;
205                 __entry->min_nr                 = min_nr;
206                 __entry->min_key_cache          = min_key_cache;
207                 __entry->prereserved            = prereserved;
208                 __entry->prereserved_total      = prereserved_total;
209                 __entry->btree_cache_dirty      = btree_cache_dirty;
210                 __entry->btree_cache_total      = btree_cache_total;
211                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
212                 __entry->btree_key_cache_total  = btree_key_cache_total;
213         ),
214
215         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
216                   MAJOR(__entry->dev), MINOR(__entry->dev),
217                   __entry->direct,
218                   __entry->kicked,
219                   __entry->min_nr,
220                   __entry->min_key_cache,
221                   __entry->prereserved,
222                   __entry->prereserved_total,
223                   __entry->btree_cache_dirty,
224                   __entry->btree_cache_total,
225                   __entry->btree_key_cache_dirty,
226                   __entry->btree_key_cache_total)
227 );
228
229 TRACE_EVENT(journal_reclaim_finish,
230         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
231         TP_ARGS(c, nr_flushed),
232
233         TP_STRUCT__entry(
234                 __field(dev_t,          dev                     )
235                 __field(u64,            nr_flushed              )
236         ),
237
238         TP_fast_assign(
239                 __entry->dev            = c->dev;
240                 __entry->nr_flushed     = nr_flushed;
241         ),
242
243         TP_printk("%d,%d flushed %llu",
244                   MAJOR(__entry->dev), MINOR(__entry->dev),
245                   __entry->nr_flushed)
246 );
247
248 /* bset.c: */
249
250 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
251         TP_PROTO(const struct bpos *p),
252         TP_ARGS(p)
253 );
254
255 /* Btree cache: */
256
257 TRACE_EVENT(btree_cache_scan,
258         TP_PROTO(long nr_to_scan, long can_free, long ret),
259         TP_ARGS(nr_to_scan, can_free, ret),
260
261         TP_STRUCT__entry(
262                 __field(long,   nr_to_scan              )
263                 __field(long,   can_free                )
264                 __field(long,   ret                     )
265         ),
266
267         TP_fast_assign(
268                 __entry->nr_to_scan     = nr_to_scan;
269                 __entry->can_free       = can_free;
270                 __entry->ret            = ret;
271         ),
272
273         TP_printk("scanned for %li nodes, can free %li, ret %li",
274                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
275 );
276
277 DEFINE_EVENT(btree_node, btree_cache_reap,
278         TP_PROTO(struct bch_fs *c, struct btree *b),
279         TP_ARGS(c, b)
280 );
281
282 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
283         TP_PROTO(struct bch_fs *c),
284         TP_ARGS(c)
285 );
286
287 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
288         TP_PROTO(struct bch_fs *c),
289         TP_ARGS(c)
290 );
291
292 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
293         TP_PROTO(struct bch_fs *c),
294         TP_ARGS(c)
295 );
296
297 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
298         TP_PROTO(struct bch_fs *c),
299         TP_ARGS(c)
300 );
301
302 /* Btree */
303
304 DEFINE_EVENT(btree_node, btree_node_read,
305         TP_PROTO(struct bch_fs *c, struct btree *b),
306         TP_ARGS(c, b)
307 );
308
309 TRACE_EVENT(btree_node_write,
310         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
311         TP_ARGS(b, bytes, sectors),
312
313         TP_STRUCT__entry(
314                 __field(enum btree_node_type,   type)
315                 __field(unsigned,       bytes                   )
316                 __field(unsigned,       sectors                 )
317         ),
318
319         TP_fast_assign(
320                 __entry->type   = btree_node_type(b);
321                 __entry->bytes  = bytes;
322                 __entry->sectors = sectors;
323         ),
324
325         TP_printk("bkey type %u bytes %u sectors %u",
326                   __entry->type , __entry->bytes, __entry->sectors)
327 );
328
329 DEFINE_EVENT(btree_node, btree_node_alloc,
330         TP_PROTO(struct bch_fs *c, struct btree *b),
331         TP_ARGS(c, b)
332 );
333
334 DEFINE_EVENT(btree_node, btree_node_free,
335         TP_PROTO(struct bch_fs *c, struct btree *b),
336         TP_ARGS(c, b)
337 );
338
339 TRACE_EVENT(btree_reserve_get_fail,
340         TP_PROTO(const char *trans_fn,
341                  unsigned long caller_ip,
342                  size_t required,
343                  int ret),
344         TP_ARGS(trans_fn, caller_ip, required, ret),
345
346         TP_STRUCT__entry(
347                 __array(char,                   trans_fn, 32    )
348                 __field(unsigned long,          caller_ip       )
349                 __field(size_t,                 required        )
350                 __array(char,                   ret, 32         )
351         ),
352
353         TP_fast_assign(
354                 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
355                 __entry->caller_ip      = caller_ip;
356                 __entry->required       = required;
357                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
358         ),
359
360         TP_printk("%s %pS required %zu ret %s",
361                   __entry->trans_fn,
362                   (void *) __entry->caller_ip,
363                   __entry->required,
364                   __entry->ret)
365 );
366
367 DEFINE_EVENT(btree_node, btree_node_compact,
368         TP_PROTO(struct bch_fs *c, struct btree *b),
369         TP_ARGS(c, b)
370 );
371
372 DEFINE_EVENT(btree_node, btree_node_merge,
373         TP_PROTO(struct bch_fs *c, struct btree *b),
374         TP_ARGS(c, b)
375 );
376
377 DEFINE_EVENT(btree_node, btree_node_split,
378         TP_PROTO(struct bch_fs *c, struct btree *b),
379         TP_ARGS(c, b)
380 );
381
382 DEFINE_EVENT(btree_node, btree_node_rewrite,
383         TP_PROTO(struct bch_fs *c, struct btree *b),
384         TP_ARGS(c, b)
385 );
386
387 DEFINE_EVENT(btree_node, btree_node_set_root,
388         TP_PROTO(struct bch_fs *c, struct btree *b),
389         TP_ARGS(c, b)
390 );
391
392 TRACE_EVENT(btree_path_relock_fail,
393         TP_PROTO(struct btree_trans *trans,
394                  unsigned long caller_ip,
395                  struct btree_path *path,
396                  unsigned level),
397         TP_ARGS(trans, caller_ip, path, level),
398
399         TP_STRUCT__entry(
400                 __array(char,                   trans_fn, 32    )
401                 __field(unsigned long,          caller_ip       )
402                 __field(u8,                     btree_id        )
403                 __field(u8,                     level           )
404                 TRACE_BPOS_entries(pos)
405                 __array(char,                   node, 24        )
406                 __field(u32,                    iter_lock_seq   )
407                 __field(u32,                    node_lock_seq   )
408         ),
409
410         TP_fast_assign(
411                 struct btree *b = btree_path_node(path, level);
412
413                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
414                 __entry->caller_ip              = caller_ip;
415                 __entry->btree_id               = path->btree_id;
416                 __entry->level                  = path->level;
417                 TRACE_BPOS_assign(pos, path->pos);
418                 if (IS_ERR(b))
419                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
420                 else
421                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
422                 __entry->iter_lock_seq          = path->l[level].lock_seq;
423                 __entry->node_lock_seq          = is_btree_node(path, level)
424                         ? six_lock_seq(&path->l[level].b->c.lock)
425                         : 0;
426         ),
427
428         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u",
429                   __entry->trans_fn,
430                   (void *) __entry->caller_ip,
431                   bch2_btree_ids[__entry->btree_id],
432                   __entry->pos_inode,
433                   __entry->pos_offset,
434                   __entry->pos_snapshot,
435                   __entry->level,
436                   __entry->node,
437                   __entry->iter_lock_seq,
438                   __entry->node_lock_seq)
439 );
440
441 TRACE_EVENT(btree_path_upgrade_fail,
442         TP_PROTO(struct btree_trans *trans,
443                  unsigned long caller_ip,
444                  struct btree_path *path,
445                  unsigned level),
446         TP_ARGS(trans, caller_ip, path, level),
447
448         TP_STRUCT__entry(
449                 __array(char,                   trans_fn, 32    )
450                 __field(unsigned long,          caller_ip       )
451                 __field(u8,                     btree_id        )
452                 __field(u8,                     level           )
453                 TRACE_BPOS_entries(pos)
454                 __field(u8,                     locked          )
455                 __field(u8,                     self_read_count )
456                 __field(u8,                     self_intent_count)
457                 __field(u8,                     read_count      )
458                 __field(u8,                     intent_count    )
459                 __field(u32,                    iter_lock_seq   )
460                 __field(u32,                    node_lock_seq   )
461         ),
462
463         TP_fast_assign(
464                 struct six_lock_count c;
465
466                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
467                 __entry->caller_ip              = caller_ip;
468                 __entry->btree_id               = path->btree_id;
469                 __entry->level                  = level;
470                 TRACE_BPOS_assign(pos, path->pos);
471                 __entry->locked                 = btree_node_locked(path, level);
472
473                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
474                 __entry->self_read_count        = c.n[SIX_LOCK_read];
475                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
476                 c = six_lock_counts(&path->l[level].b->c.lock);
477                 __entry->read_count             = c.n[SIX_LOCK_read];
478                 __entry->intent_count           = c.n[SIX_LOCK_read];
479                 __entry->iter_lock_seq          = path->l[level].lock_seq;
480                 __entry->node_lock_seq          = is_btree_node(path, level)
481                         ? six_lock_seq(&path->l[level].b->c.lock)
482                         : 0;
483         ),
484
485         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
486                   __entry->trans_fn,
487                   (void *) __entry->caller_ip,
488                   bch2_btree_ids[__entry->btree_id],
489                   __entry->pos_inode,
490                   __entry->pos_offset,
491                   __entry->pos_snapshot,
492                   __entry->level,
493                   __entry->locked,
494                   __entry->self_read_count,
495                   __entry->self_intent_count,
496                   __entry->read_count,
497                   __entry->intent_count,
498                   __entry->iter_lock_seq,
499                   __entry->node_lock_seq)
500 );
501
502 /* Garbage collection */
503
504 DEFINE_EVENT(bch_fs, gc_gens_start,
505         TP_PROTO(struct bch_fs *c),
506         TP_ARGS(c)
507 );
508
509 DEFINE_EVENT(bch_fs, gc_gens_end,
510         TP_PROTO(struct bch_fs *c),
511         TP_ARGS(c)
512 );
513
514 /* Allocator */
515
516 DECLARE_EVENT_CLASS(bucket_alloc,
517         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
518                  u64 bucket,
519                  u64 free,
520                  u64 avail,
521                  u64 copygc_wait_amount,
522                  s64 copygc_waiting_for,
523                  struct bucket_alloc_state *s,
524                  bool nonblocking,
525                  const char *err),
526         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
527                 copygc_wait_amount, copygc_waiting_for,
528                 s, nonblocking, err),
529
530         TP_STRUCT__entry(
531                 __field(u8,                     dev                     )
532                 __array(char,   reserve,        16                      )
533                 __field(u64,                    bucket  )
534                 __field(u64,                    free                    )
535                 __field(u64,                    avail                   )
536                 __field(u64,                    copygc_wait_amount      )
537                 __field(s64,                    copygc_waiting_for      )
538                 __field(u64,                    seen                    )
539                 __field(u64,                    open                    )
540                 __field(u64,                    need_journal_commit     )
541                 __field(u64,                    nouse                   )
542                 __field(bool,                   nonblocking             )
543                 __field(u64,                    nocow                   )
544                 __array(char,                   err,    32              )
545         ),
546
547         TP_fast_assign(
548                 __entry->dev            = ca->dev_idx;
549                 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
550                 __entry->bucket         = bucket;
551                 __entry->free           = free;
552                 __entry->avail          = avail;
553                 __entry->copygc_wait_amount     = copygc_wait_amount;
554                 __entry->copygc_waiting_for     = copygc_waiting_for;
555                 __entry->seen           = s->buckets_seen;
556                 __entry->open           = s->skipped_open;
557                 __entry->need_journal_commit = s->skipped_need_journal_commit;
558                 __entry->nouse          = s->skipped_nouse;
559                 __entry->nonblocking    = nonblocking;
560                 __entry->nocow          = s->skipped_nocow;
561                 strscpy(__entry->err, err, sizeof(__entry->err));
562         ),
563
564         TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
565                   __entry->reserve,
566                   __entry->dev,
567                   __entry->bucket,
568                   __entry->free,
569                   __entry->avail,
570                   __entry->copygc_wait_amount,
571                   __entry->copygc_waiting_for,
572                   __entry->seen,
573                   __entry->open,
574                   __entry->need_journal_commit,
575                   __entry->nouse,
576                   __entry->nocow,
577                   __entry->nonblocking,
578                   __entry->err)
579 );
580
581 DEFINE_EVENT(bucket_alloc, bucket_alloc,
582         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
583                  u64 bucket,
584                  u64 free,
585                  u64 avail,
586                  u64 copygc_wait_amount,
587                  s64 copygc_waiting_for,
588                  struct bucket_alloc_state *s,
589                  bool nonblocking,
590                  const char *err),
591         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
592                 copygc_wait_amount, copygc_waiting_for,
593                 s, nonblocking, err)
594 );
595
596 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
597         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
598                  u64 bucket,
599                  u64 free,
600                  u64 avail,
601                  u64 copygc_wait_amount,
602                  s64 copygc_waiting_for,
603                  struct bucket_alloc_state *s,
604                  bool nonblocking,
605                  const char *err),
606         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
607                 copygc_wait_amount, copygc_waiting_for,
608                 s, nonblocking, err)
609 );
610
611 TRACE_EVENT(discard_buckets,
612         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
613                  u64 need_journal_commit, u64 discarded, const char *err),
614         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
615
616         TP_STRUCT__entry(
617                 __field(dev_t,          dev                     )
618                 __field(u64,            seen                    )
619                 __field(u64,            open                    )
620                 __field(u64,            need_journal_commit     )
621                 __field(u64,            discarded               )
622                 __array(char,           err,    16              )
623         ),
624
625         TP_fast_assign(
626                 __entry->dev                    = c->dev;
627                 __entry->seen                   = seen;
628                 __entry->open                   = open;
629                 __entry->need_journal_commit    = need_journal_commit;
630                 __entry->discarded              = discarded;
631                 strscpy(__entry->err, err, sizeof(__entry->err));
632         ),
633
634         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
635                   MAJOR(__entry->dev), MINOR(__entry->dev),
636                   __entry->seen,
637                   __entry->open,
638                   __entry->need_journal_commit,
639                   __entry->discarded,
640                   __entry->err)
641 );
642
643 TRACE_EVENT(bucket_invalidate,
644         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
645         TP_ARGS(c, dev, bucket, sectors),
646
647         TP_STRUCT__entry(
648                 __field(dev_t,          dev                     )
649                 __field(u32,            dev_idx                 )
650                 __field(u32,            sectors                 )
651                 __field(u64,            bucket                  )
652         ),
653
654         TP_fast_assign(
655                 __entry->dev            = c->dev;
656                 __entry->dev_idx        = dev;
657                 __entry->sectors        = sectors;
658                 __entry->bucket         = bucket;
659         ),
660
661         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
662                   MAJOR(__entry->dev), MINOR(__entry->dev),
663                   __entry->dev_idx, __entry->bucket,
664                   __entry->sectors)
665 );
666
667 /* Moving IO */
668
669 TRACE_EVENT(bucket_evacuate,
670         TP_PROTO(struct bch_fs *c, struct bpos *bucket),
671         TP_ARGS(c, bucket),
672
673         TP_STRUCT__entry(
674                 __field(dev_t,          dev                     )
675                 __field(u32,            dev_idx                 )
676                 __field(u64,            bucket                  )
677         ),
678
679         TP_fast_assign(
680                 __entry->dev            = c->dev;
681                 __entry->dev_idx        = bucket->inode;
682                 __entry->bucket         = bucket->offset;
683         ),
684
685         TP_printk("%d:%d %u:%llu",
686                   MAJOR(__entry->dev), MINOR(__entry->dev),
687                   __entry->dev_idx, __entry->bucket)
688 );
689
690 DEFINE_EVENT(bkey, move_extent,
691         TP_PROTO(struct bch_fs *c, const char *k),
692         TP_ARGS(c, k)
693 );
694
695 DEFINE_EVENT(bkey, move_extent_read,
696         TP_PROTO(struct bch_fs *c, const char *k),
697         TP_ARGS(c, k)
698 );
699
700 DEFINE_EVENT(bkey, move_extent_write,
701         TP_PROTO(struct bch_fs *c, const char *k),
702         TP_ARGS(c, k)
703 );
704
705 DEFINE_EVENT(bkey, move_extent_finish,
706         TP_PROTO(struct bch_fs *c, const char *k),
707         TP_ARGS(c, k)
708 );
709
710 TRACE_EVENT(move_extent_fail,
711         TP_PROTO(struct bch_fs *c, const char *msg),
712         TP_ARGS(c, msg),
713
714         TP_STRUCT__entry(
715                 __field(dev_t,          dev                     )
716                 __string(msg,           msg                     )
717         ),
718
719         TP_fast_assign(
720                 __entry->dev            = c->dev;
721                 __assign_str(msg, msg);
722         ),
723
724         TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
725 );
726
727 DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
728         TP_PROTO(struct bch_fs *c, const char *k),
729         TP_ARGS(c, k)
730 );
731
732 TRACE_EVENT(move_data,
733         TP_PROTO(struct bch_fs *c, u64 sectors_moved,
734                  u64 keys_moved),
735         TP_ARGS(c, sectors_moved, keys_moved),
736
737         TP_STRUCT__entry(
738                 __field(dev_t,          dev                     )
739                 __field(u64,            sectors_moved   )
740                 __field(u64,            keys_moved      )
741         ),
742
743         TP_fast_assign(
744                 __entry->dev                    = c->dev;
745                 __entry->sectors_moved = sectors_moved;
746                 __entry->keys_moved = keys_moved;
747         ),
748
749         TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
750                   MAJOR(__entry->dev), MINOR(__entry->dev),
751                   __entry->sectors_moved, __entry->keys_moved)
752 );
753
754 TRACE_EVENT(evacuate_bucket,
755         TP_PROTO(struct bch_fs *c, struct bpos *bucket,
756                  unsigned sectors, unsigned bucket_size,
757                  u64 fragmentation, int ret),
758         TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
759
760         TP_STRUCT__entry(
761                 __field(dev_t,          dev             )
762                 __field(u64,            member          )
763                 __field(u64,            bucket          )
764                 __field(u32,            sectors         )
765                 __field(u32,            bucket_size     )
766                 __field(u64,            fragmentation   )
767                 __field(int,            ret             )
768         ),
769
770         TP_fast_assign(
771                 __entry->dev                    = c->dev;
772                 __entry->member                 = bucket->inode;
773                 __entry->bucket                 = bucket->offset;
774                 __entry->sectors                = sectors;
775                 __entry->bucket_size            = bucket_size;
776                 __entry->fragmentation          = fragmentation;
777                 __entry->ret                    = ret;
778         ),
779
780         TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
781                   MAJOR(__entry->dev), MINOR(__entry->dev),
782                   __entry->member, __entry->bucket,
783                   __entry->sectors, __entry->bucket_size,
784                   __entry->fragmentation, __entry->ret)
785 );
786
787 TRACE_EVENT(copygc,
788         TP_PROTO(struct bch_fs *c,
789                  u64 sectors_moved, u64 sectors_not_moved,
790                  u64 buckets_moved, u64 buckets_not_moved),
791         TP_ARGS(c,
792                 sectors_moved, sectors_not_moved,
793                 buckets_moved, buckets_not_moved),
794
795         TP_STRUCT__entry(
796                 __field(dev_t,          dev                     )
797                 __field(u64,            sectors_moved           )
798                 __field(u64,            sectors_not_moved       )
799                 __field(u64,            buckets_moved           )
800                 __field(u64,            buckets_not_moved       )
801         ),
802
803         TP_fast_assign(
804                 __entry->dev                    = c->dev;
805                 __entry->sectors_moved          = sectors_moved;
806                 __entry->sectors_not_moved      = sectors_not_moved;
807                 __entry->buckets_moved          = buckets_moved;
808                 __entry->buckets_not_moved = buckets_moved;
809         ),
810
811         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
812                   MAJOR(__entry->dev), MINOR(__entry->dev),
813                   __entry->sectors_moved, __entry->sectors_not_moved,
814                   __entry->buckets_moved, __entry->buckets_not_moved)
815 );
816
817 TRACE_EVENT(copygc_wait,
818         TP_PROTO(struct bch_fs *c,
819                  u64 wait_amount, u64 until),
820         TP_ARGS(c, wait_amount, until),
821
822         TP_STRUCT__entry(
823                 __field(dev_t,          dev                     )
824                 __field(u64,            wait_amount             )
825                 __field(u64,            until                   )
826         ),
827
828         TP_fast_assign(
829                 __entry->dev            = c->dev;
830                 __entry->wait_amount    = wait_amount;
831                 __entry->until          = until;
832         ),
833
834         TP_printk("%d,%u waiting for %llu sectors until %llu",
835                   MAJOR(__entry->dev), MINOR(__entry->dev),
836                   __entry->wait_amount, __entry->until)
837 );
838
839 /* btree transactions: */
840
841 DECLARE_EVENT_CLASS(transaction_event,
842         TP_PROTO(struct btree_trans *trans,
843                  unsigned long caller_ip),
844         TP_ARGS(trans, caller_ip),
845
846         TP_STRUCT__entry(
847                 __array(char,                   trans_fn, 32    )
848                 __field(unsigned long,          caller_ip       )
849         ),
850
851         TP_fast_assign(
852                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
853                 __entry->caller_ip              = caller_ip;
854         ),
855
856         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
857 );
858
859 DEFINE_EVENT(transaction_event, transaction_commit,
860         TP_PROTO(struct btree_trans *trans,
861                  unsigned long caller_ip),
862         TP_ARGS(trans, caller_ip)
863 );
864
865 DEFINE_EVENT(transaction_event, trans_restart_injected,
866         TP_PROTO(struct btree_trans *trans,
867                  unsigned long caller_ip),
868         TP_ARGS(trans, caller_ip)
869 );
870
871 TRACE_EVENT(trans_restart_split_race,
872         TP_PROTO(struct btree_trans *trans,
873                  unsigned long caller_ip,
874                  struct btree *b),
875         TP_ARGS(trans, caller_ip, b),
876
877         TP_STRUCT__entry(
878                 __array(char,                   trans_fn, 32    )
879                 __field(unsigned long,          caller_ip       )
880                 __field(u8,                     level           )
881                 __field(u16,                    written         )
882                 __field(u16,                    blocks          )
883                 __field(u16,                    u64s_remaining  )
884         ),
885
886         TP_fast_assign(
887                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
888                 __entry->caller_ip              = caller_ip;
889                 __entry->level          = b->c.level;
890                 __entry->written        = b->written;
891                 __entry->blocks         = btree_blocks(trans->c);
892                 __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
893         ),
894
895         TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
896                   __entry->trans_fn, (void *) __entry->caller_ip,
897                   __entry->level,
898                   __entry->written, __entry->blocks,
899                   __entry->u64s_remaining)
900 );
901
902 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
903         TP_PROTO(struct btree_trans *trans,
904                  unsigned long caller_ip),
905         TP_ARGS(trans, caller_ip)
906 );
907
908 TRACE_EVENT(trans_restart_journal_preres_get,
909         TP_PROTO(struct btree_trans *trans,
910                  unsigned long caller_ip,
911                  unsigned flags),
912         TP_ARGS(trans, caller_ip, flags),
913
914         TP_STRUCT__entry(
915                 __array(char,                   trans_fn, 32    )
916                 __field(unsigned long,          caller_ip       )
917                 __field(unsigned,               flags           )
918         ),
919
920         TP_fast_assign(
921                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
922                 __entry->caller_ip              = caller_ip;
923                 __entry->flags                  = flags;
924         ),
925
926         TP_printk("%s %pS %x", __entry->trans_fn,
927                   (void *) __entry->caller_ip,
928                   __entry->flags)
929 );
930
931 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
932         TP_PROTO(struct btree_trans *trans,
933                  unsigned long caller_ip),
934         TP_ARGS(trans, caller_ip)
935 );
936
937 DEFINE_EVENT(transaction_event, trans_traverse_all,
938         TP_PROTO(struct btree_trans *trans,
939                  unsigned long caller_ip),
940         TP_ARGS(trans, caller_ip)
941 );
942
943 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
944         TP_PROTO(struct btree_trans *trans,
945                  unsigned long caller_ip),
946         TP_ARGS(trans, caller_ip)
947 );
948
949 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
950         TP_PROTO(struct btree_trans *trans,
951                  unsigned long caller_ip),
952         TP_ARGS(trans, caller_ip)
953 );
954
955 DECLARE_EVENT_CLASS(transaction_restart_iter,
956         TP_PROTO(struct btree_trans *trans,
957                  unsigned long caller_ip,
958                  struct btree_path *path),
959         TP_ARGS(trans, caller_ip, path),
960
961         TP_STRUCT__entry(
962                 __array(char,                   trans_fn, 32    )
963                 __field(unsigned long,          caller_ip       )
964                 __field(u8,                     btree_id        )
965                 TRACE_BPOS_entries(pos)
966         ),
967
968         TP_fast_assign(
969                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
970                 __entry->caller_ip              = caller_ip;
971                 __entry->btree_id               = path->btree_id;
972                 TRACE_BPOS_assign(pos, path->pos)
973         ),
974
975         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
976                   __entry->trans_fn,
977                   (void *) __entry->caller_ip,
978                   bch2_btree_ids[__entry->btree_id],
979                   __entry->pos_inode,
980                   __entry->pos_offset,
981                   __entry->pos_snapshot)
982 );
983
984 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
985         TP_PROTO(struct btree_trans *trans,
986                  unsigned long caller_ip,
987                  struct btree_path *path),
988         TP_ARGS(trans, caller_ip, path)
989 );
990
991 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
992         TP_PROTO(struct btree_trans *trans,
993                  unsigned long caller_ip,
994                  struct btree_path *path),
995         TP_ARGS(trans, caller_ip, path)
996 );
997
998 TRACE_EVENT(trans_restart_upgrade,
999         TP_PROTO(struct btree_trans *trans,
1000                  unsigned long caller_ip,
1001                  struct btree_path *path,
1002                  unsigned old_locks_want,
1003                  unsigned new_locks_want),
1004         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
1005
1006         TP_STRUCT__entry(
1007                 __array(char,                   trans_fn, 32    )
1008                 __field(unsigned long,          caller_ip       )
1009                 __field(u8,                     btree_id        )
1010                 __field(u8,                     old_locks_want  )
1011                 __field(u8,                     new_locks_want  )
1012                 TRACE_BPOS_entries(pos)
1013         ),
1014
1015         TP_fast_assign(
1016                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1017                 __entry->caller_ip              = caller_ip;
1018                 __entry->btree_id               = path->btree_id;
1019                 __entry->old_locks_want         = old_locks_want;
1020                 __entry->new_locks_want         = new_locks_want;
1021                 TRACE_BPOS_assign(pos, path->pos)
1022         ),
1023
1024         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
1025                   __entry->trans_fn,
1026                   (void *) __entry->caller_ip,
1027                   bch2_btree_ids[__entry->btree_id],
1028                   __entry->pos_inode,
1029                   __entry->pos_offset,
1030                   __entry->pos_snapshot,
1031                   __entry->old_locks_want,
1032                   __entry->new_locks_want)
1033 );
1034
1035 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
1036         TP_PROTO(struct btree_trans *trans,
1037                  unsigned long caller_ip,
1038                  struct btree_path *path),
1039         TP_ARGS(trans, caller_ip, path)
1040 );
1041
1042 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
1043         TP_PROTO(struct btree_trans *trans,
1044                  unsigned long caller_ip,
1045                  struct btree_path *path),
1046         TP_ARGS(trans, caller_ip, path)
1047 );
1048
1049 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
1050         TP_PROTO(struct btree_trans *trans,
1051                  unsigned long caller_ip,
1052                  struct btree_path *path),
1053         TP_ARGS(trans, caller_ip, path)
1054 );
1055
1056 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
1057         TP_PROTO(struct btree_trans *trans,
1058                  unsigned long caller_ip,
1059                  struct btree_path *path),
1060         TP_ARGS(trans, caller_ip, path)
1061 );
1062
1063 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1064         TP_PROTO(struct btree_trans *trans,
1065                  unsigned long caller_ip),
1066         TP_ARGS(trans, caller_ip)
1067 );
1068
1069 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
1070         TP_PROTO(struct btree_trans *trans,
1071                  unsigned long caller_ip,
1072                  struct btree_path *path),
1073         TP_ARGS(trans, caller_ip, path)
1074 );
1075
1076 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
1077         TP_PROTO(struct btree_trans *trans,
1078                  unsigned long caller_ip,
1079                  struct btree_path *path),
1080         TP_ARGS(trans, caller_ip, path)
1081 );
1082
1083 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
1084         TP_PROTO(struct btree_trans *trans,
1085                  unsigned long caller_ip,
1086                  struct btree_path *path),
1087         TP_ARGS(trans, caller_ip, path)
1088 );
1089
1090 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
1091         TP_PROTO(struct btree_trans *trans,
1092                  unsigned long caller_ip,
1093                  struct btree_path *path),
1094         TP_ARGS(trans, caller_ip, path)
1095 );
1096
1097 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1098         TP_PROTO(struct btree_trans *trans,
1099                  unsigned long caller_ip,
1100                  struct btree_path *path),
1101         TP_ARGS(trans, caller_ip, path)
1102 );
1103
1104 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
1105         TP_PROTO(struct btree_trans *trans,
1106                  unsigned long caller_ip),
1107         TP_ARGS(trans, caller_ip)
1108 );
1109
1110 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1111         TP_PROTO(struct btree_trans *trans,
1112                  unsigned long caller_ip),
1113         TP_ARGS(trans, caller_ip)
1114 );
1115
1116 TRACE_EVENT(trans_restart_would_deadlock_write,
1117         TP_PROTO(struct btree_trans *trans),
1118         TP_ARGS(trans),
1119
1120         TP_STRUCT__entry(
1121                 __array(char,                   trans_fn, 32    )
1122         ),
1123
1124         TP_fast_assign(
1125                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1126         ),
1127
1128         TP_printk("%s", __entry->trans_fn)
1129 );
1130
1131 TRACE_EVENT(trans_restart_mem_realloced,
1132         TP_PROTO(struct btree_trans *trans,
1133                  unsigned long caller_ip,
1134                  unsigned long bytes),
1135         TP_ARGS(trans, caller_ip, bytes),
1136
1137         TP_STRUCT__entry(
1138                 __array(char,                   trans_fn, 32    )
1139                 __field(unsigned long,          caller_ip       )
1140                 __field(unsigned long,          bytes           )
1141         ),
1142
1143         TP_fast_assign(
1144                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1145                 __entry->caller_ip      = caller_ip;
1146                 __entry->bytes          = bytes;
1147         ),
1148
1149         TP_printk("%s %pS bytes %lu",
1150                   __entry->trans_fn,
1151                   (void *) __entry->caller_ip,
1152                   __entry->bytes)
1153 );
1154
1155 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1156         TP_PROTO(struct btree_trans *trans,
1157                  unsigned long caller_ip,
1158                  struct btree_path *path,
1159                  unsigned old_u64s,
1160                  unsigned new_u64s),
1161         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1162
1163         TP_STRUCT__entry(
1164                 __array(char,                   trans_fn, 32    )
1165                 __field(unsigned long,          caller_ip       )
1166                 __field(enum btree_id,          btree_id        )
1167                 TRACE_BPOS_entries(pos)
1168                 __field(u32,                    old_u64s        )
1169                 __field(u32,                    new_u64s        )
1170         ),
1171
1172         TP_fast_assign(
1173                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1174                 __entry->caller_ip              = caller_ip;
1175
1176                 __entry->btree_id       = path->btree_id;
1177                 TRACE_BPOS_assign(pos, path->pos);
1178                 __entry->old_u64s       = old_u64s;
1179                 __entry->new_u64s       = new_u64s;
1180         ),
1181
1182         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1183                   __entry->trans_fn,
1184                   (void *) __entry->caller_ip,
1185                   bch2_btree_ids[__entry->btree_id],
1186                   __entry->pos_inode,
1187                   __entry->pos_offset,
1188                   __entry->pos_snapshot,
1189                   __entry->old_u64s,
1190                   __entry->new_u64s)
1191 );
1192
1193 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1194         TP_PROTO(struct btree_trans *trans,
1195                  unsigned long caller_ip),
1196         TP_ARGS(trans, caller_ip)
1197 );
1198
1199 TRACE_EVENT(write_buffer_flush,
1200         TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1201         TP_ARGS(trans, nr, skipped, fast, size),
1202
1203         TP_STRUCT__entry(
1204                 __field(size_t,         nr              )
1205                 __field(size_t,         skipped         )
1206                 __field(size_t,         fast            )
1207                 __field(size_t,         size            )
1208         ),
1209
1210         TP_fast_assign(
1211                 __entry->nr     = nr;
1212                 __entry->skipped = skipped;
1213                 __entry->fast   = fast;
1214                 __entry->size   = size;
1215         ),
1216
1217         TP_printk("%zu/%zu skipped %zu fast %zu",
1218                   __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1219 );
1220
1221 TRACE_EVENT(write_buffer_flush_slowpath,
1222         TP_PROTO(struct btree_trans *trans, size_t nr, size_t size),
1223         TP_ARGS(trans, nr, size),
1224
1225         TP_STRUCT__entry(
1226                 __field(size_t,         nr              )
1227                 __field(size_t,         size            )
1228         ),
1229
1230         TP_fast_assign(
1231                 __entry->nr     = nr;
1232                 __entry->size   = size;
1233         ),
1234
1235         TP_printk("%zu/%zu", __entry->nr, __entry->size)
1236 );
1237
1238 #endif /* _TRACE_BCACHEFS_H */
1239
1240 /* This part must be outside protection */
1241 #undef TRACE_INCLUDE_PATH
1242 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1243
1244 #undef TRACE_INCLUDE_FILE
1245 #define TRACE_INCLUDE_FILE trace
1246
1247 #include <trace/define_trace.h>