]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/trace.h
Update bcachefs sources to e7f6215768 bcachefs: Fix snapshot_skiplist_good()
[bcachefs-tools-debian] / libbcachefs / trace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(bkey,
36         TP_PROTO(struct bch_fs *c, const char *k),
37         TP_ARGS(c, k),
38
39         TP_STRUCT__entry(
40                 __string(k,     k                               )
41         ),
42
43         TP_fast_assign(
44                 __assign_str(k, k);
45         ),
46
47         TP_printk("%s", __get_str(k))
48 );
49
50 DECLARE_EVENT_CLASS(btree_node,
51         TP_PROTO(struct bch_fs *c, struct btree *b),
52         TP_ARGS(c, b),
53
54         TP_STRUCT__entry(
55                 __field(dev_t,          dev                     )
56                 __field(u8,             level                   )
57                 __field(u8,             btree_id                )
58                 TRACE_BPOS_entries(pos)
59         ),
60
61         TP_fast_assign(
62                 __entry->dev            = c->dev;
63                 __entry->level          = b->c.level;
64                 __entry->btree_id       = b->c.btree_id;
65                 TRACE_BPOS_assign(pos, b->key.k.p);
66         ),
67
68         TP_printk("%d,%d %u %s %llu:%llu:%u",
69                   MAJOR(__entry->dev), MINOR(__entry->dev),
70                   __entry->level,
71                   bch2_btree_ids[__entry->btree_id],
72                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
73 );
74
75 DECLARE_EVENT_CLASS(bch_fs,
76         TP_PROTO(struct bch_fs *c),
77         TP_ARGS(c),
78
79         TP_STRUCT__entry(
80                 __field(dev_t,          dev                     )
81         ),
82
83         TP_fast_assign(
84                 __entry->dev            = c->dev;
85         ),
86
87         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
88 );
89
90 DECLARE_EVENT_CLASS(bio,
91         TP_PROTO(struct bio *bio),
92         TP_ARGS(bio),
93
94         TP_STRUCT__entry(
95                 __field(dev_t,          dev                     )
96                 __field(sector_t,       sector                  )
97                 __field(unsigned int,   nr_sector               )
98                 __array(char,           rwbs,   6               )
99         ),
100
101         TP_fast_assign(
102                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
103                 __entry->sector         = bio->bi_iter.bi_sector;
104                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
105                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
106         ),
107
108         TP_printk("%d,%d  %s %llu + %u",
109                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
110                   (unsigned long long)__entry->sector, __entry->nr_sector)
111 );
112
113 /* super-io.c: */
114 TRACE_EVENT(write_super,
115         TP_PROTO(struct bch_fs *c, unsigned long ip),
116         TP_ARGS(c, ip),
117
118         TP_STRUCT__entry(
119                 __field(dev_t,          dev     )
120                 __field(unsigned long,  ip      )
121         ),
122
123         TP_fast_assign(
124                 __entry->dev            = c->dev;
125                 __entry->ip             = ip;
126         ),
127
128         TP_printk("%d,%d for %pS",
129                   MAJOR(__entry->dev), MINOR(__entry->dev),
130                   (void *) __entry->ip)
131 );
132
133 /* io.c: */
134
135 DEFINE_EVENT(bio, read_promote,
136         TP_PROTO(struct bio *bio),
137         TP_ARGS(bio)
138 );
139
140 DEFINE_EVENT(bio, read_bounce,
141         TP_PROTO(struct bio *bio),
142         TP_ARGS(bio)
143 );
144
145 DEFINE_EVENT(bio, read_split,
146         TP_PROTO(struct bio *bio),
147         TP_ARGS(bio)
148 );
149
150 DEFINE_EVENT(bio, read_retry,
151         TP_PROTO(struct bio *bio),
152         TP_ARGS(bio)
153 );
154
155 DEFINE_EVENT(bio, read_reuse_race,
156         TP_PROTO(struct bio *bio),
157         TP_ARGS(bio)
158 );
159
160 /* Journal */
161
162 DEFINE_EVENT(bch_fs, journal_full,
163         TP_PROTO(struct bch_fs *c),
164         TP_ARGS(c)
165 );
166
167 DEFINE_EVENT(bch_fs, journal_entry_full,
168         TP_PROTO(struct bch_fs *c),
169         TP_ARGS(c)
170 );
171
172 DEFINE_EVENT(bio, journal_write,
173         TP_PROTO(struct bio *bio),
174         TP_ARGS(bio)
175 );
176
177 TRACE_EVENT(journal_reclaim_start,
178         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
179                  u64 min_nr, u64 min_key_cache,
180                  u64 prereserved, u64 prereserved_total,
181                  u64 btree_cache_dirty, u64 btree_cache_total,
182                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
183         TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
184                 btree_cache_dirty, btree_cache_total,
185                 btree_key_cache_dirty, btree_key_cache_total),
186
187         TP_STRUCT__entry(
188                 __field(dev_t,          dev                     )
189                 __field(bool,           direct                  )
190                 __field(bool,           kicked                  )
191                 __field(u64,            min_nr                  )
192                 __field(u64,            min_key_cache           )
193                 __field(u64,            prereserved             )
194                 __field(u64,            prereserved_total       )
195                 __field(u64,            btree_cache_dirty       )
196                 __field(u64,            btree_cache_total       )
197                 __field(u64,            btree_key_cache_dirty   )
198                 __field(u64,            btree_key_cache_total   )
199         ),
200
201         TP_fast_assign(
202                 __entry->dev                    = c->dev;
203                 __entry->direct                 = direct;
204                 __entry->kicked                 = kicked;
205                 __entry->min_nr                 = min_nr;
206                 __entry->min_key_cache          = min_key_cache;
207                 __entry->prereserved            = prereserved;
208                 __entry->prereserved_total      = prereserved_total;
209                 __entry->btree_cache_dirty      = btree_cache_dirty;
210                 __entry->btree_cache_total      = btree_cache_total;
211                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
212                 __entry->btree_key_cache_total  = btree_key_cache_total;
213         ),
214
215         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
216                   MAJOR(__entry->dev), MINOR(__entry->dev),
217                   __entry->direct,
218                   __entry->kicked,
219                   __entry->min_nr,
220                   __entry->min_key_cache,
221                   __entry->prereserved,
222                   __entry->prereserved_total,
223                   __entry->btree_cache_dirty,
224                   __entry->btree_cache_total,
225                   __entry->btree_key_cache_dirty,
226                   __entry->btree_key_cache_total)
227 );
228
229 TRACE_EVENT(journal_reclaim_finish,
230         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
231         TP_ARGS(c, nr_flushed),
232
233         TP_STRUCT__entry(
234                 __field(dev_t,          dev                     )
235                 __field(u64,            nr_flushed              )
236         ),
237
238         TP_fast_assign(
239                 __entry->dev            = c->dev;
240                 __entry->nr_flushed     = nr_flushed;
241         ),
242
243         TP_printk("%d,%d flushed %llu",
244                   MAJOR(__entry->dev), MINOR(__entry->dev),
245                   __entry->nr_flushed)
246 );
247
248 /* bset.c: */
249
250 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
251         TP_PROTO(const struct bpos *p),
252         TP_ARGS(p)
253 );
254
255 /* Btree cache: */
256
257 TRACE_EVENT(btree_cache_scan,
258         TP_PROTO(long nr_to_scan, long can_free, long ret),
259         TP_ARGS(nr_to_scan, can_free, ret),
260
261         TP_STRUCT__entry(
262                 __field(long,   nr_to_scan              )
263                 __field(long,   can_free                )
264                 __field(long,   ret                     )
265         ),
266
267         TP_fast_assign(
268                 __entry->nr_to_scan     = nr_to_scan;
269                 __entry->can_free       = can_free;
270                 __entry->ret            = ret;
271         ),
272
273         TP_printk("scanned for %li nodes, can free %li, ret %li",
274                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
275 );
276
277 DEFINE_EVENT(btree_node, btree_cache_reap,
278         TP_PROTO(struct bch_fs *c, struct btree *b),
279         TP_ARGS(c, b)
280 );
281
282 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
283         TP_PROTO(struct bch_fs *c),
284         TP_ARGS(c)
285 );
286
287 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
288         TP_PROTO(struct bch_fs *c),
289         TP_ARGS(c)
290 );
291
292 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
293         TP_PROTO(struct bch_fs *c),
294         TP_ARGS(c)
295 );
296
297 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
298         TP_PROTO(struct bch_fs *c),
299         TP_ARGS(c)
300 );
301
302 /* Btree */
303
304 DEFINE_EVENT(btree_node, btree_node_read,
305         TP_PROTO(struct bch_fs *c, struct btree *b),
306         TP_ARGS(c, b)
307 );
308
309 TRACE_EVENT(btree_node_write,
310         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
311         TP_ARGS(b, bytes, sectors),
312
313         TP_STRUCT__entry(
314                 __field(enum btree_node_type,   type)
315                 __field(unsigned,       bytes                   )
316                 __field(unsigned,       sectors                 )
317         ),
318
319         TP_fast_assign(
320                 __entry->type   = btree_node_type(b);
321                 __entry->bytes  = bytes;
322                 __entry->sectors = sectors;
323         ),
324
325         TP_printk("bkey type %u bytes %u sectors %u",
326                   __entry->type , __entry->bytes, __entry->sectors)
327 );
328
329 DEFINE_EVENT(btree_node, btree_node_alloc,
330         TP_PROTO(struct bch_fs *c, struct btree *b),
331         TP_ARGS(c, b)
332 );
333
334 DEFINE_EVENT(btree_node, btree_node_free,
335         TP_PROTO(struct bch_fs *c, struct btree *b),
336         TP_ARGS(c, b)
337 );
338
339 TRACE_EVENT(btree_reserve_get_fail,
340         TP_PROTO(const char *trans_fn,
341                  unsigned long caller_ip,
342                  size_t required,
343                  int ret),
344         TP_ARGS(trans_fn, caller_ip, required, ret),
345
346         TP_STRUCT__entry(
347                 __array(char,                   trans_fn, 32    )
348                 __field(unsigned long,          caller_ip       )
349                 __field(size_t,                 required        )
350                 __array(char,                   ret, 32         )
351         ),
352
353         TP_fast_assign(
354                 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
355                 __entry->caller_ip      = caller_ip;
356                 __entry->required       = required;
357                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
358         ),
359
360         TP_printk("%s %pS required %zu ret %s",
361                   __entry->trans_fn,
362                   (void *) __entry->caller_ip,
363                   __entry->required,
364                   __entry->ret)
365 );
366
367 DEFINE_EVENT(btree_node, btree_node_compact,
368         TP_PROTO(struct bch_fs *c, struct btree *b),
369         TP_ARGS(c, b)
370 );
371
372 DEFINE_EVENT(btree_node, btree_node_merge,
373         TP_PROTO(struct bch_fs *c, struct btree *b),
374         TP_ARGS(c, b)
375 );
376
377 DEFINE_EVENT(btree_node, btree_node_split,
378         TP_PROTO(struct bch_fs *c, struct btree *b),
379         TP_ARGS(c, b)
380 );
381
382 DEFINE_EVENT(btree_node, btree_node_rewrite,
383         TP_PROTO(struct bch_fs *c, struct btree *b),
384         TP_ARGS(c, b)
385 );
386
387 DEFINE_EVENT(btree_node, btree_node_set_root,
388         TP_PROTO(struct bch_fs *c, struct btree *b),
389         TP_ARGS(c, b)
390 );
391
392 TRACE_EVENT(btree_path_relock_fail,
393         TP_PROTO(struct btree_trans *trans,
394                  unsigned long caller_ip,
395                  struct btree_path *path,
396                  unsigned level),
397         TP_ARGS(trans, caller_ip, path, level),
398
399         TP_STRUCT__entry(
400                 __array(char,                   trans_fn, 32    )
401                 __field(unsigned long,          caller_ip       )
402                 __field(u8,                     btree_id        )
403                 __field(u8,                     level           )
404                 TRACE_BPOS_entries(pos)
405                 __array(char,                   node, 24        )
406                 __field(u8,                     self_read_count )
407                 __field(u8,                     self_intent_count)
408                 __field(u8,                     read_count      )
409                 __field(u8,                     intent_count    )
410                 __field(u32,                    iter_lock_seq   )
411                 __field(u32,                    node_lock_seq   )
412         ),
413
414         TP_fast_assign(
415                 struct btree *b = btree_path_node(path, level);
416                 struct six_lock_count c;
417
418                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
419                 __entry->caller_ip              = caller_ip;
420                 __entry->btree_id               = path->btree_id;
421                 __entry->level                  = path->level;
422                 TRACE_BPOS_assign(pos, path->pos);
423
424                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
425                 __entry->self_read_count        = c.n[SIX_LOCK_read];
426                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
427
428                 if (IS_ERR(b)) {
429                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
430                 } else {
431                         c = six_lock_counts(&path->l[level].b->c.lock);
432                         __entry->read_count     = c.n[SIX_LOCK_read];
433                         __entry->intent_count   = c.n[SIX_LOCK_intent];
434                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
435                 }
436                 __entry->iter_lock_seq          = path->l[level].lock_seq;
437                 __entry->node_lock_seq          = is_btree_node(path, level)
438                         ? six_lock_seq(&path->l[level].b->c.lock)
439                         : 0;
440         ),
441
442         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
443                   __entry->trans_fn,
444                   (void *) __entry->caller_ip,
445                   bch2_btree_ids[__entry->btree_id],
446                   __entry->pos_inode,
447                   __entry->pos_offset,
448                   __entry->pos_snapshot,
449                   __entry->level,
450                   __entry->node,
451                   __entry->self_read_count,
452                   __entry->self_intent_count,
453                   __entry->read_count,
454                   __entry->intent_count,
455                   __entry->iter_lock_seq,
456                   __entry->node_lock_seq)
457 );
458
459 TRACE_EVENT(btree_path_upgrade_fail,
460         TP_PROTO(struct btree_trans *trans,
461                  unsigned long caller_ip,
462                  struct btree_path *path,
463                  unsigned level),
464         TP_ARGS(trans, caller_ip, path, level),
465
466         TP_STRUCT__entry(
467                 __array(char,                   trans_fn, 32    )
468                 __field(unsigned long,          caller_ip       )
469                 __field(u8,                     btree_id        )
470                 __field(u8,                     level           )
471                 TRACE_BPOS_entries(pos)
472                 __field(u8,                     locked          )
473                 __field(u8,                     self_read_count )
474                 __field(u8,                     self_intent_count)
475                 __field(u8,                     read_count      )
476                 __field(u8,                     intent_count    )
477                 __field(u32,                    iter_lock_seq   )
478                 __field(u32,                    node_lock_seq   )
479         ),
480
481         TP_fast_assign(
482                 struct six_lock_count c;
483
484                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
485                 __entry->caller_ip              = caller_ip;
486                 __entry->btree_id               = path->btree_id;
487                 __entry->level                  = level;
488                 TRACE_BPOS_assign(pos, path->pos);
489                 __entry->locked                 = btree_node_locked(path, level);
490
491                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
492                 __entry->self_read_count        = c.n[SIX_LOCK_read];
493                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
494                 c = six_lock_counts(&path->l[level].b->c.lock);
495                 __entry->read_count             = c.n[SIX_LOCK_read];
496                 __entry->intent_count           = c.n[SIX_LOCK_intent];
497                 __entry->iter_lock_seq          = path->l[level].lock_seq;
498                 __entry->node_lock_seq          = is_btree_node(path, level)
499                         ? six_lock_seq(&path->l[level].b->c.lock)
500                         : 0;
501         ),
502
503         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
504                   __entry->trans_fn,
505                   (void *) __entry->caller_ip,
506                   bch2_btree_ids[__entry->btree_id],
507                   __entry->pos_inode,
508                   __entry->pos_offset,
509                   __entry->pos_snapshot,
510                   __entry->level,
511                   __entry->locked,
512                   __entry->self_read_count,
513                   __entry->self_intent_count,
514                   __entry->read_count,
515                   __entry->intent_count,
516                   __entry->iter_lock_seq,
517                   __entry->node_lock_seq)
518 );
519
520 /* Garbage collection */
521
522 DEFINE_EVENT(bch_fs, gc_gens_start,
523         TP_PROTO(struct bch_fs *c),
524         TP_ARGS(c)
525 );
526
527 DEFINE_EVENT(bch_fs, gc_gens_end,
528         TP_PROTO(struct bch_fs *c),
529         TP_ARGS(c)
530 );
531
532 /* Allocator */
533
534 DECLARE_EVENT_CLASS(bucket_alloc,
535         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
536                  u64 bucket,
537                  u64 free,
538                  u64 avail,
539                  u64 copygc_wait_amount,
540                  s64 copygc_waiting_for,
541                  struct bucket_alloc_state *s,
542                  bool nonblocking,
543                  const char *err),
544         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
545                 copygc_wait_amount, copygc_waiting_for,
546                 s, nonblocking, err),
547
548         TP_STRUCT__entry(
549                 __field(u8,                     dev                     )
550                 __array(char,   reserve,        16                      )
551                 __field(u64,                    bucket  )
552                 __field(u64,                    free                    )
553                 __field(u64,                    avail                   )
554                 __field(u64,                    copygc_wait_amount      )
555                 __field(s64,                    copygc_waiting_for      )
556                 __field(u64,                    seen                    )
557                 __field(u64,                    open                    )
558                 __field(u64,                    need_journal_commit     )
559                 __field(u64,                    nouse                   )
560                 __field(bool,                   nonblocking             )
561                 __field(u64,                    nocow                   )
562                 __array(char,                   err,    32              )
563         ),
564
565         TP_fast_assign(
566                 __entry->dev            = ca->dev_idx;
567                 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
568                 __entry->bucket         = bucket;
569                 __entry->free           = free;
570                 __entry->avail          = avail;
571                 __entry->copygc_wait_amount     = copygc_wait_amount;
572                 __entry->copygc_waiting_for     = copygc_waiting_for;
573                 __entry->seen           = s->buckets_seen;
574                 __entry->open           = s->skipped_open;
575                 __entry->need_journal_commit = s->skipped_need_journal_commit;
576                 __entry->nouse          = s->skipped_nouse;
577                 __entry->nonblocking    = nonblocking;
578                 __entry->nocow          = s->skipped_nocow;
579                 strscpy(__entry->err, err, sizeof(__entry->err));
580         ),
581
582         TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
583                   __entry->reserve,
584                   __entry->dev,
585                   __entry->bucket,
586                   __entry->free,
587                   __entry->avail,
588                   __entry->copygc_wait_amount,
589                   __entry->copygc_waiting_for,
590                   __entry->seen,
591                   __entry->open,
592                   __entry->need_journal_commit,
593                   __entry->nouse,
594                   __entry->nocow,
595                   __entry->nonblocking,
596                   __entry->err)
597 );
598
599 DEFINE_EVENT(bucket_alloc, bucket_alloc,
600         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
601                  u64 bucket,
602                  u64 free,
603                  u64 avail,
604                  u64 copygc_wait_amount,
605                  s64 copygc_waiting_for,
606                  struct bucket_alloc_state *s,
607                  bool nonblocking,
608                  const char *err),
609         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
610                 copygc_wait_amount, copygc_waiting_for,
611                 s, nonblocking, err)
612 );
613
614 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
615         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
616                  u64 bucket,
617                  u64 free,
618                  u64 avail,
619                  u64 copygc_wait_amount,
620                  s64 copygc_waiting_for,
621                  struct bucket_alloc_state *s,
622                  bool nonblocking,
623                  const char *err),
624         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
625                 copygc_wait_amount, copygc_waiting_for,
626                 s, nonblocking, err)
627 );
628
629 TRACE_EVENT(discard_buckets,
630         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
631                  u64 need_journal_commit, u64 discarded, const char *err),
632         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
633
634         TP_STRUCT__entry(
635                 __field(dev_t,          dev                     )
636                 __field(u64,            seen                    )
637                 __field(u64,            open                    )
638                 __field(u64,            need_journal_commit     )
639                 __field(u64,            discarded               )
640                 __array(char,           err,    16              )
641         ),
642
643         TP_fast_assign(
644                 __entry->dev                    = c->dev;
645                 __entry->seen                   = seen;
646                 __entry->open                   = open;
647                 __entry->need_journal_commit    = need_journal_commit;
648                 __entry->discarded              = discarded;
649                 strscpy(__entry->err, err, sizeof(__entry->err));
650         ),
651
652         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
653                   MAJOR(__entry->dev), MINOR(__entry->dev),
654                   __entry->seen,
655                   __entry->open,
656                   __entry->need_journal_commit,
657                   __entry->discarded,
658                   __entry->err)
659 );
660
661 TRACE_EVENT(bucket_invalidate,
662         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
663         TP_ARGS(c, dev, bucket, sectors),
664
665         TP_STRUCT__entry(
666                 __field(dev_t,          dev                     )
667                 __field(u32,            dev_idx                 )
668                 __field(u32,            sectors                 )
669                 __field(u64,            bucket                  )
670         ),
671
672         TP_fast_assign(
673                 __entry->dev            = c->dev;
674                 __entry->dev_idx        = dev;
675                 __entry->sectors        = sectors;
676                 __entry->bucket         = bucket;
677         ),
678
679         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
680                   MAJOR(__entry->dev), MINOR(__entry->dev),
681                   __entry->dev_idx, __entry->bucket,
682                   __entry->sectors)
683 );
684
685 /* Moving IO */
686
687 TRACE_EVENT(bucket_evacuate,
688         TP_PROTO(struct bch_fs *c, struct bpos *bucket),
689         TP_ARGS(c, bucket),
690
691         TP_STRUCT__entry(
692                 __field(dev_t,          dev                     )
693                 __field(u32,            dev_idx                 )
694                 __field(u64,            bucket                  )
695         ),
696
697         TP_fast_assign(
698                 __entry->dev            = c->dev;
699                 __entry->dev_idx        = bucket->inode;
700                 __entry->bucket         = bucket->offset;
701         ),
702
703         TP_printk("%d:%d %u:%llu",
704                   MAJOR(__entry->dev), MINOR(__entry->dev),
705                   __entry->dev_idx, __entry->bucket)
706 );
707
708 DEFINE_EVENT(bkey, move_extent,
709         TP_PROTO(struct bch_fs *c, const char *k),
710         TP_ARGS(c, k)
711 );
712
713 DEFINE_EVENT(bkey, move_extent_read,
714         TP_PROTO(struct bch_fs *c, const char *k),
715         TP_ARGS(c, k)
716 );
717
718 DEFINE_EVENT(bkey, move_extent_write,
719         TP_PROTO(struct bch_fs *c, const char *k),
720         TP_ARGS(c, k)
721 );
722
723 DEFINE_EVENT(bkey, move_extent_finish,
724         TP_PROTO(struct bch_fs *c, const char *k),
725         TP_ARGS(c, k)
726 );
727
728 TRACE_EVENT(move_extent_fail,
729         TP_PROTO(struct bch_fs *c, const char *msg),
730         TP_ARGS(c, msg),
731
732         TP_STRUCT__entry(
733                 __field(dev_t,          dev                     )
734                 __string(msg,           msg                     )
735         ),
736
737         TP_fast_assign(
738                 __entry->dev            = c->dev;
739                 __assign_str(msg, msg);
740         ),
741
742         TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
743 );
744
745 DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
746         TP_PROTO(struct bch_fs *c, const char *k),
747         TP_ARGS(c, k)
748 );
749
750 TRACE_EVENT(move_data,
751         TP_PROTO(struct bch_fs *c, u64 sectors_moved,
752                  u64 keys_moved),
753         TP_ARGS(c, sectors_moved, keys_moved),
754
755         TP_STRUCT__entry(
756                 __field(dev_t,          dev                     )
757                 __field(u64,            sectors_moved   )
758                 __field(u64,            keys_moved      )
759         ),
760
761         TP_fast_assign(
762                 __entry->dev                    = c->dev;
763                 __entry->sectors_moved = sectors_moved;
764                 __entry->keys_moved = keys_moved;
765         ),
766
767         TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
768                   MAJOR(__entry->dev), MINOR(__entry->dev),
769                   __entry->sectors_moved, __entry->keys_moved)
770 );
771
772 TRACE_EVENT(evacuate_bucket,
773         TP_PROTO(struct bch_fs *c, struct bpos *bucket,
774                  unsigned sectors, unsigned bucket_size,
775                  u64 fragmentation, int ret),
776         TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
777
778         TP_STRUCT__entry(
779                 __field(dev_t,          dev             )
780                 __field(u64,            member          )
781                 __field(u64,            bucket          )
782                 __field(u32,            sectors         )
783                 __field(u32,            bucket_size     )
784                 __field(u64,            fragmentation   )
785                 __field(int,            ret             )
786         ),
787
788         TP_fast_assign(
789                 __entry->dev                    = c->dev;
790                 __entry->member                 = bucket->inode;
791                 __entry->bucket                 = bucket->offset;
792                 __entry->sectors                = sectors;
793                 __entry->bucket_size            = bucket_size;
794                 __entry->fragmentation          = fragmentation;
795                 __entry->ret                    = ret;
796         ),
797
798         TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
799                   MAJOR(__entry->dev), MINOR(__entry->dev),
800                   __entry->member, __entry->bucket,
801                   __entry->sectors, __entry->bucket_size,
802                   __entry->fragmentation, __entry->ret)
803 );
804
805 TRACE_EVENT(copygc,
806         TP_PROTO(struct bch_fs *c,
807                  u64 sectors_moved, u64 sectors_not_moved,
808                  u64 buckets_moved, u64 buckets_not_moved),
809         TP_ARGS(c,
810                 sectors_moved, sectors_not_moved,
811                 buckets_moved, buckets_not_moved),
812
813         TP_STRUCT__entry(
814                 __field(dev_t,          dev                     )
815                 __field(u64,            sectors_moved           )
816                 __field(u64,            sectors_not_moved       )
817                 __field(u64,            buckets_moved           )
818                 __field(u64,            buckets_not_moved       )
819         ),
820
821         TP_fast_assign(
822                 __entry->dev                    = c->dev;
823                 __entry->sectors_moved          = sectors_moved;
824                 __entry->sectors_not_moved      = sectors_not_moved;
825                 __entry->buckets_moved          = buckets_moved;
826                 __entry->buckets_not_moved = buckets_moved;
827         ),
828
829         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
830                   MAJOR(__entry->dev), MINOR(__entry->dev),
831                   __entry->sectors_moved, __entry->sectors_not_moved,
832                   __entry->buckets_moved, __entry->buckets_not_moved)
833 );
834
835 TRACE_EVENT(copygc_wait,
836         TP_PROTO(struct bch_fs *c,
837                  u64 wait_amount, u64 until),
838         TP_ARGS(c, wait_amount, until),
839
840         TP_STRUCT__entry(
841                 __field(dev_t,          dev                     )
842                 __field(u64,            wait_amount             )
843                 __field(u64,            until                   )
844         ),
845
846         TP_fast_assign(
847                 __entry->dev            = c->dev;
848                 __entry->wait_amount    = wait_amount;
849                 __entry->until          = until;
850         ),
851
852         TP_printk("%d,%u waiting for %llu sectors until %llu",
853                   MAJOR(__entry->dev), MINOR(__entry->dev),
854                   __entry->wait_amount, __entry->until)
855 );
856
857 /* btree transactions: */
858
859 DECLARE_EVENT_CLASS(transaction_event,
860         TP_PROTO(struct btree_trans *trans,
861                  unsigned long caller_ip),
862         TP_ARGS(trans, caller_ip),
863
864         TP_STRUCT__entry(
865                 __array(char,                   trans_fn, 32    )
866                 __field(unsigned long,          caller_ip       )
867         ),
868
869         TP_fast_assign(
870                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
871                 __entry->caller_ip              = caller_ip;
872         ),
873
874         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
875 );
876
877 DEFINE_EVENT(transaction_event, transaction_commit,
878         TP_PROTO(struct btree_trans *trans,
879                  unsigned long caller_ip),
880         TP_ARGS(trans, caller_ip)
881 );
882
883 DEFINE_EVENT(transaction_event, trans_restart_injected,
884         TP_PROTO(struct btree_trans *trans,
885                  unsigned long caller_ip),
886         TP_ARGS(trans, caller_ip)
887 );
888
889 TRACE_EVENT(trans_restart_split_race,
890         TP_PROTO(struct btree_trans *trans,
891                  unsigned long caller_ip,
892                  struct btree *b),
893         TP_ARGS(trans, caller_ip, b),
894
895         TP_STRUCT__entry(
896                 __array(char,                   trans_fn, 32    )
897                 __field(unsigned long,          caller_ip       )
898                 __field(u8,                     level           )
899                 __field(u16,                    written         )
900                 __field(u16,                    blocks          )
901                 __field(u16,                    u64s_remaining  )
902         ),
903
904         TP_fast_assign(
905                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
906                 __entry->caller_ip              = caller_ip;
907                 __entry->level          = b->c.level;
908                 __entry->written        = b->written;
909                 __entry->blocks         = btree_blocks(trans->c);
910                 __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
911         ),
912
913         TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
914                   __entry->trans_fn, (void *) __entry->caller_ip,
915                   __entry->level,
916                   __entry->written, __entry->blocks,
917                   __entry->u64s_remaining)
918 );
919
920 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
921         TP_PROTO(struct btree_trans *trans,
922                  unsigned long caller_ip),
923         TP_ARGS(trans, caller_ip)
924 );
925
926 TRACE_EVENT(trans_restart_journal_preres_get,
927         TP_PROTO(struct btree_trans *trans,
928                  unsigned long caller_ip,
929                  unsigned flags),
930         TP_ARGS(trans, caller_ip, flags),
931
932         TP_STRUCT__entry(
933                 __array(char,                   trans_fn, 32    )
934                 __field(unsigned long,          caller_ip       )
935                 __field(unsigned,               flags           )
936         ),
937
938         TP_fast_assign(
939                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
940                 __entry->caller_ip              = caller_ip;
941                 __entry->flags                  = flags;
942         ),
943
944         TP_printk("%s %pS %x", __entry->trans_fn,
945                   (void *) __entry->caller_ip,
946                   __entry->flags)
947 );
948
949 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
950         TP_PROTO(struct btree_trans *trans,
951                  unsigned long caller_ip),
952         TP_ARGS(trans, caller_ip)
953 );
954
955 DEFINE_EVENT(transaction_event, trans_traverse_all,
956         TP_PROTO(struct btree_trans *trans,
957                  unsigned long caller_ip),
958         TP_ARGS(trans, caller_ip)
959 );
960
961 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
962         TP_PROTO(struct btree_trans *trans,
963                  unsigned long caller_ip),
964         TP_ARGS(trans, caller_ip)
965 );
966
967 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
968         TP_PROTO(struct btree_trans *trans,
969                  unsigned long caller_ip),
970         TP_ARGS(trans, caller_ip)
971 );
972
973 DECLARE_EVENT_CLASS(transaction_restart_iter,
974         TP_PROTO(struct btree_trans *trans,
975                  unsigned long caller_ip,
976                  struct btree_path *path),
977         TP_ARGS(trans, caller_ip, path),
978
979         TP_STRUCT__entry(
980                 __array(char,                   trans_fn, 32    )
981                 __field(unsigned long,          caller_ip       )
982                 __field(u8,                     btree_id        )
983                 TRACE_BPOS_entries(pos)
984         ),
985
986         TP_fast_assign(
987                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
988                 __entry->caller_ip              = caller_ip;
989                 __entry->btree_id               = path->btree_id;
990                 TRACE_BPOS_assign(pos, path->pos)
991         ),
992
993         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
994                   __entry->trans_fn,
995                   (void *) __entry->caller_ip,
996                   bch2_btree_ids[__entry->btree_id],
997                   __entry->pos_inode,
998                   __entry->pos_offset,
999                   __entry->pos_snapshot)
1000 );
1001
1002 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
1003         TP_PROTO(struct btree_trans *trans,
1004                  unsigned long caller_ip,
1005                  struct btree_path *path),
1006         TP_ARGS(trans, caller_ip, path)
1007 );
1008
1009 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
1010         TP_PROTO(struct btree_trans *trans,
1011                  unsigned long caller_ip,
1012                  struct btree_path *path),
1013         TP_ARGS(trans, caller_ip, path)
1014 );
1015
1016 TRACE_EVENT(trans_restart_upgrade,
1017         TP_PROTO(struct btree_trans *trans,
1018                  unsigned long caller_ip,
1019                  struct btree_path *path,
1020                  unsigned old_locks_want,
1021                  unsigned new_locks_want),
1022         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
1023
1024         TP_STRUCT__entry(
1025                 __array(char,                   trans_fn, 32    )
1026                 __field(unsigned long,          caller_ip       )
1027                 __field(u8,                     btree_id        )
1028                 __field(u8,                     old_locks_want  )
1029                 __field(u8,                     new_locks_want  )
1030                 TRACE_BPOS_entries(pos)
1031         ),
1032
1033         TP_fast_assign(
1034                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1035                 __entry->caller_ip              = caller_ip;
1036                 __entry->btree_id               = path->btree_id;
1037                 __entry->old_locks_want         = old_locks_want;
1038                 __entry->new_locks_want         = new_locks_want;
1039                 TRACE_BPOS_assign(pos, path->pos)
1040         ),
1041
1042         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
1043                   __entry->trans_fn,
1044                   (void *) __entry->caller_ip,
1045                   bch2_btree_ids[__entry->btree_id],
1046                   __entry->pos_inode,
1047                   __entry->pos_offset,
1048                   __entry->pos_snapshot,
1049                   __entry->old_locks_want,
1050                   __entry->new_locks_want)
1051 );
1052
1053 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
1054         TP_PROTO(struct btree_trans *trans,
1055                  unsigned long caller_ip,
1056                  struct btree_path *path),
1057         TP_ARGS(trans, caller_ip, path)
1058 );
1059
1060 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
1061         TP_PROTO(struct btree_trans *trans,
1062                  unsigned long caller_ip,
1063                  struct btree_path *path),
1064         TP_ARGS(trans, caller_ip, path)
1065 );
1066
1067 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
1068         TP_PROTO(struct btree_trans *trans,
1069                  unsigned long caller_ip,
1070                  struct btree_path *path),
1071         TP_ARGS(trans, caller_ip, path)
1072 );
1073
1074 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
1075         TP_PROTO(struct btree_trans *trans,
1076                  unsigned long caller_ip,
1077                  struct btree_path *path),
1078         TP_ARGS(trans, caller_ip, path)
1079 );
1080
1081 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1082         TP_PROTO(struct btree_trans *trans,
1083                  unsigned long caller_ip),
1084         TP_ARGS(trans, caller_ip)
1085 );
1086
1087 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
1088         TP_PROTO(struct btree_trans *trans,
1089                  unsigned long caller_ip,
1090                  struct btree_path *path),
1091         TP_ARGS(trans, caller_ip, path)
1092 );
1093
1094 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
1095         TP_PROTO(struct btree_trans *trans,
1096                  unsigned long caller_ip,
1097                  struct btree_path *path),
1098         TP_ARGS(trans, caller_ip, path)
1099 );
1100
1101 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
1102         TP_PROTO(struct btree_trans *trans,
1103                  unsigned long caller_ip,
1104                  struct btree_path *path),
1105         TP_ARGS(trans, caller_ip, path)
1106 );
1107
1108 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
1109         TP_PROTO(struct btree_trans *trans,
1110                  unsigned long caller_ip,
1111                  struct btree_path *path),
1112         TP_ARGS(trans, caller_ip, path)
1113 );
1114
1115 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1116         TP_PROTO(struct btree_trans *trans,
1117                  unsigned long caller_ip,
1118                  struct btree_path *path),
1119         TP_ARGS(trans, caller_ip, path)
1120 );
1121
1122 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
1123         TP_PROTO(struct btree_trans *trans,
1124                  unsigned long caller_ip),
1125         TP_ARGS(trans, caller_ip)
1126 );
1127
1128 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1129         TP_PROTO(struct btree_trans *trans,
1130                  unsigned long caller_ip),
1131         TP_ARGS(trans, caller_ip)
1132 );
1133
1134 TRACE_EVENT(trans_restart_would_deadlock_write,
1135         TP_PROTO(struct btree_trans *trans),
1136         TP_ARGS(trans),
1137
1138         TP_STRUCT__entry(
1139                 __array(char,                   trans_fn, 32    )
1140         ),
1141
1142         TP_fast_assign(
1143                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1144         ),
1145
1146         TP_printk("%s", __entry->trans_fn)
1147 );
1148
1149 TRACE_EVENT(trans_restart_mem_realloced,
1150         TP_PROTO(struct btree_trans *trans,
1151                  unsigned long caller_ip,
1152                  unsigned long bytes),
1153         TP_ARGS(trans, caller_ip, bytes),
1154
1155         TP_STRUCT__entry(
1156                 __array(char,                   trans_fn, 32    )
1157                 __field(unsigned long,          caller_ip       )
1158                 __field(unsigned long,          bytes           )
1159         ),
1160
1161         TP_fast_assign(
1162                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1163                 __entry->caller_ip      = caller_ip;
1164                 __entry->bytes          = bytes;
1165         ),
1166
1167         TP_printk("%s %pS bytes %lu",
1168                   __entry->trans_fn,
1169                   (void *) __entry->caller_ip,
1170                   __entry->bytes)
1171 );
1172
1173 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1174         TP_PROTO(struct btree_trans *trans,
1175                  unsigned long caller_ip,
1176                  struct btree_path *path,
1177                  unsigned old_u64s,
1178                  unsigned new_u64s),
1179         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1180
1181         TP_STRUCT__entry(
1182                 __array(char,                   trans_fn, 32    )
1183                 __field(unsigned long,          caller_ip       )
1184                 __field(enum btree_id,          btree_id        )
1185                 TRACE_BPOS_entries(pos)
1186                 __field(u32,                    old_u64s        )
1187                 __field(u32,                    new_u64s        )
1188         ),
1189
1190         TP_fast_assign(
1191                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1192                 __entry->caller_ip              = caller_ip;
1193
1194                 __entry->btree_id       = path->btree_id;
1195                 TRACE_BPOS_assign(pos, path->pos);
1196                 __entry->old_u64s       = old_u64s;
1197                 __entry->new_u64s       = new_u64s;
1198         ),
1199
1200         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1201                   __entry->trans_fn,
1202                   (void *) __entry->caller_ip,
1203                   bch2_btree_ids[__entry->btree_id],
1204                   __entry->pos_inode,
1205                   __entry->pos_offset,
1206                   __entry->pos_snapshot,
1207                   __entry->old_u64s,
1208                   __entry->new_u64s)
1209 );
1210
1211 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1212         TP_PROTO(struct btree_trans *trans,
1213                  unsigned long caller_ip),
1214         TP_ARGS(trans, caller_ip)
1215 );
1216
1217 TRACE_EVENT(write_buffer_flush,
1218         TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1219         TP_ARGS(trans, nr, skipped, fast, size),
1220
1221         TP_STRUCT__entry(
1222                 __field(size_t,         nr              )
1223                 __field(size_t,         skipped         )
1224                 __field(size_t,         fast            )
1225                 __field(size_t,         size            )
1226         ),
1227
1228         TP_fast_assign(
1229                 __entry->nr     = nr;
1230                 __entry->skipped = skipped;
1231                 __entry->fast   = fast;
1232                 __entry->size   = size;
1233         ),
1234
1235         TP_printk("%zu/%zu skipped %zu fast %zu",
1236                   __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1237 );
1238
1239 TRACE_EVENT(write_buffer_flush_slowpath,
1240         TP_PROTO(struct btree_trans *trans, size_t nr, size_t size),
1241         TP_ARGS(trans, nr, size),
1242
1243         TP_STRUCT__entry(
1244                 __field(size_t,         nr              )
1245                 __field(size_t,         size            )
1246         ),
1247
1248         TP_fast_assign(
1249                 __entry->nr     = nr;
1250                 __entry->size   = size;
1251         ),
1252
1253         TP_printk("%zu/%zu", __entry->nr, __entry->size)
1254 );
1255
1256 #endif /* _TRACE_BCACHEFS_H */
1257
1258 /* This part must be outside protection */
1259 #undef TRACE_INCLUDE_PATH
1260 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1261
1262 #undef TRACE_INCLUDE_FILE
1263 #define TRACE_INCLUDE_FILE trace
1264
1265 #include <trace/define_trace.h>