]> git.sesse.net Git - bcachefs-tools-debian/blob - include/trace/events/bcachefs.h
Update bcachefs sources to 1b14994029 bcachefs: Fragmentation LRU
[bcachefs-tools-debian] / include / trace / events / bcachefs.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHE_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(bkey,
36         TP_PROTO(const struct bkey *k),
37         TP_ARGS(k),
38
39         TP_STRUCT__entry(
40                 __field(u64,    inode                           )
41                 __field(u64,    offset                          )
42                 __field(u32,    size                            )
43         ),
44
45         TP_fast_assign(
46                 __entry->inode  = k->p.inode;
47                 __entry->offset = k->p.offset;
48                 __entry->size   = k->size;
49         ),
50
51         TP_printk("%llu:%llu len %u", __entry->inode,
52                   __entry->offset, __entry->size)
53 );
54
55 DECLARE_EVENT_CLASS(btree_node,
56         TP_PROTO(struct bch_fs *c, struct btree *b),
57         TP_ARGS(c, b),
58
59         TP_STRUCT__entry(
60                 __field(dev_t,          dev                     )
61                 __field(u8,             level                   )
62                 __field(u8,             btree_id                )
63                 TRACE_BPOS_entries(pos)
64         ),
65
66         TP_fast_assign(
67                 __entry->dev            = c->dev;
68                 __entry->level          = b->c.level;
69                 __entry->btree_id       = b->c.btree_id;
70                 TRACE_BPOS_assign(pos, b->key.k.p);
71         ),
72
73         TP_printk("%d,%d %u %s %llu:%llu:%u",
74                   MAJOR(__entry->dev), MINOR(__entry->dev),
75                   __entry->level,
76                   bch2_btree_ids[__entry->btree_id],
77                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
78 );
79
80 DECLARE_EVENT_CLASS(bch_fs,
81         TP_PROTO(struct bch_fs *c),
82         TP_ARGS(c),
83
84         TP_STRUCT__entry(
85                 __field(dev_t,          dev                     )
86         ),
87
88         TP_fast_assign(
89                 __entry->dev            = c->dev;
90         ),
91
92         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
93 );
94
95 DECLARE_EVENT_CLASS(bio,
96         TP_PROTO(struct bio *bio),
97         TP_ARGS(bio),
98
99         TP_STRUCT__entry(
100                 __field(dev_t,          dev                     )
101                 __field(sector_t,       sector                  )
102                 __field(unsigned int,   nr_sector               )
103                 __array(char,           rwbs,   6               )
104         ),
105
106         TP_fast_assign(
107                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
108                 __entry->sector         = bio->bi_iter.bi_sector;
109                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
110                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
111         ),
112
113         TP_printk("%d,%d  %s %llu + %u",
114                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
115                   (unsigned long long)__entry->sector, __entry->nr_sector)
116 );
117
118 /* super-io.c: */
119 TRACE_EVENT(write_super,
120         TP_PROTO(struct bch_fs *c, unsigned long ip),
121         TP_ARGS(c, ip),
122
123         TP_STRUCT__entry(
124                 __field(dev_t,          dev     )
125                 __field(unsigned long,  ip      )
126         ),
127
128         TP_fast_assign(
129                 __entry->dev            = c->dev;
130                 __entry->ip             = ip;
131         ),
132
133         TP_printk("%d,%d for %pS",
134                   MAJOR(__entry->dev), MINOR(__entry->dev),
135                   (void *) __entry->ip)
136 );
137
138 /* io.c: */
139
140 DEFINE_EVENT(bio, read_promote,
141         TP_PROTO(struct bio *bio),
142         TP_ARGS(bio)
143 );
144
145 DEFINE_EVENT(bio, read_bounce,
146         TP_PROTO(struct bio *bio),
147         TP_ARGS(bio)
148 );
149
150 DEFINE_EVENT(bio, read_split,
151         TP_PROTO(struct bio *bio),
152         TP_ARGS(bio)
153 );
154
155 DEFINE_EVENT(bio, read_retry,
156         TP_PROTO(struct bio *bio),
157         TP_ARGS(bio)
158 );
159
160 DEFINE_EVENT(bio, read_reuse_race,
161         TP_PROTO(struct bio *bio),
162         TP_ARGS(bio)
163 );
164
165 /* Journal */
166
167 DEFINE_EVENT(bch_fs, journal_full,
168         TP_PROTO(struct bch_fs *c),
169         TP_ARGS(c)
170 );
171
172 DEFINE_EVENT(bch_fs, journal_entry_full,
173         TP_PROTO(struct bch_fs *c),
174         TP_ARGS(c)
175 );
176
177 DEFINE_EVENT(bio, journal_write,
178         TP_PROTO(struct bio *bio),
179         TP_ARGS(bio)
180 );
181
182 TRACE_EVENT(journal_reclaim_start,
183         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
184                  u64 min_nr, u64 min_key_cache,
185                  u64 prereserved, u64 prereserved_total,
186                  u64 btree_cache_dirty, u64 btree_cache_total,
187                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
188         TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
189                 btree_cache_dirty, btree_cache_total,
190                 btree_key_cache_dirty, btree_key_cache_total),
191
192         TP_STRUCT__entry(
193                 __field(dev_t,          dev                     )
194                 __field(bool,           direct                  )
195                 __field(bool,           kicked                  )
196                 __field(u64,            min_nr                  )
197                 __field(u64,            min_key_cache           )
198                 __field(u64,            prereserved             )
199                 __field(u64,            prereserved_total       )
200                 __field(u64,            btree_cache_dirty       )
201                 __field(u64,            btree_cache_total       )
202                 __field(u64,            btree_key_cache_dirty   )
203                 __field(u64,            btree_key_cache_total   )
204         ),
205
206         TP_fast_assign(
207                 __entry->dev                    = c->dev;
208                 __entry->direct                 = direct;
209                 __entry->kicked                 = kicked;
210                 __entry->min_nr                 = min_nr;
211                 __entry->min_key_cache          = min_key_cache;
212                 __entry->prereserved            = prereserved;
213                 __entry->prereserved_total      = prereserved_total;
214                 __entry->btree_cache_dirty      = btree_cache_dirty;
215                 __entry->btree_cache_total      = btree_cache_total;
216                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
217                 __entry->btree_key_cache_total  = btree_key_cache_total;
218         ),
219
220         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
221                   MAJOR(__entry->dev), MINOR(__entry->dev),
222                   __entry->direct,
223                   __entry->kicked,
224                   __entry->min_nr,
225                   __entry->min_key_cache,
226                   __entry->prereserved,
227                   __entry->prereserved_total,
228                   __entry->btree_cache_dirty,
229                   __entry->btree_cache_total,
230                   __entry->btree_key_cache_dirty,
231                   __entry->btree_key_cache_total)
232 );
233
234 TRACE_EVENT(journal_reclaim_finish,
235         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
236         TP_ARGS(c, nr_flushed),
237
238         TP_STRUCT__entry(
239                 __field(dev_t,          dev                     )
240                 __field(u64,            nr_flushed              )
241         ),
242
243         TP_fast_assign(
244                 __entry->dev            = c->dev;
245                 __entry->nr_flushed     = nr_flushed;
246         ),
247
248         TP_printk("%d,%d flushed %llu",
249                   MAJOR(__entry->dev), MINOR(__entry->dev),
250                   __entry->nr_flushed)
251 );
252
253 /* bset.c: */
254
255 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
256         TP_PROTO(const struct bpos *p),
257         TP_ARGS(p)
258 );
259
260 /* Btree cache: */
261
262 TRACE_EVENT(btree_cache_scan,
263         TP_PROTO(long nr_to_scan, long can_free, long ret),
264         TP_ARGS(nr_to_scan, can_free, ret),
265
266         TP_STRUCT__entry(
267                 __field(long,   nr_to_scan              )
268                 __field(long,   can_free                )
269                 __field(long,   ret                     )
270         ),
271
272         TP_fast_assign(
273                 __entry->nr_to_scan     = nr_to_scan;
274                 __entry->can_free       = can_free;
275                 __entry->ret            = ret;
276         ),
277
278         TP_printk("scanned for %li nodes, can free %li, ret %li",
279                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
280 );
281
282 DEFINE_EVENT(btree_node, btree_cache_reap,
283         TP_PROTO(struct bch_fs *c, struct btree *b),
284         TP_ARGS(c, b)
285 );
286
287 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
288         TP_PROTO(struct bch_fs *c),
289         TP_ARGS(c)
290 );
291
292 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
293         TP_PROTO(struct bch_fs *c),
294         TP_ARGS(c)
295 );
296
297 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
298         TP_PROTO(struct bch_fs *c),
299         TP_ARGS(c)
300 );
301
302 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
303         TP_PROTO(struct bch_fs *c),
304         TP_ARGS(c)
305 );
306
307 /* Btree */
308
309 DEFINE_EVENT(btree_node, btree_node_read,
310         TP_PROTO(struct bch_fs *c, struct btree *b),
311         TP_ARGS(c, b)
312 );
313
314 TRACE_EVENT(btree_node_write,
315         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
316         TP_ARGS(b, bytes, sectors),
317
318         TP_STRUCT__entry(
319                 __field(enum btree_node_type,   type)
320                 __field(unsigned,       bytes                   )
321                 __field(unsigned,       sectors                 )
322         ),
323
324         TP_fast_assign(
325                 __entry->type   = btree_node_type(b);
326                 __entry->bytes  = bytes;
327                 __entry->sectors = sectors;
328         ),
329
330         TP_printk("bkey type %u bytes %u sectors %u",
331                   __entry->type , __entry->bytes, __entry->sectors)
332 );
333
334 DEFINE_EVENT(btree_node, btree_node_alloc,
335         TP_PROTO(struct bch_fs *c, struct btree *b),
336         TP_ARGS(c, b)
337 );
338
339 DEFINE_EVENT(btree_node, btree_node_free,
340         TP_PROTO(struct bch_fs *c, struct btree *b),
341         TP_ARGS(c, b)
342 );
343
344 TRACE_EVENT(btree_reserve_get_fail,
345         TP_PROTO(const char *trans_fn,
346                  unsigned long caller_ip,
347                  size_t required,
348                  int ret),
349         TP_ARGS(trans_fn, caller_ip, required, ret),
350
351         TP_STRUCT__entry(
352                 __array(char,                   trans_fn, 32    )
353                 __field(unsigned long,          caller_ip       )
354                 __field(size_t,                 required        )
355                 __array(char,                   ret, 32         )
356         ),
357
358         TP_fast_assign(
359                 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
360                 __entry->caller_ip      = caller_ip;
361                 __entry->required       = required;
362                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
363         ),
364
365         TP_printk("%s %pS required %zu ret %s",
366                   __entry->trans_fn,
367                   (void *) __entry->caller_ip,
368                   __entry->required,
369                   __entry->ret)
370 );
371
372 DEFINE_EVENT(btree_node, btree_node_compact,
373         TP_PROTO(struct bch_fs *c, struct btree *b),
374         TP_ARGS(c, b)
375 );
376
377 DEFINE_EVENT(btree_node, btree_node_merge,
378         TP_PROTO(struct bch_fs *c, struct btree *b),
379         TP_ARGS(c, b)
380 );
381
382 DEFINE_EVENT(btree_node, btree_node_split,
383         TP_PROTO(struct bch_fs *c, struct btree *b),
384         TP_ARGS(c, b)
385 );
386
387 DEFINE_EVENT(btree_node, btree_node_rewrite,
388         TP_PROTO(struct bch_fs *c, struct btree *b),
389         TP_ARGS(c, b)
390 );
391
392 DEFINE_EVENT(btree_node, btree_node_set_root,
393         TP_PROTO(struct bch_fs *c, struct btree *b),
394         TP_ARGS(c, b)
395 );
396
397 TRACE_EVENT(btree_path_relock_fail,
398         TP_PROTO(struct btree_trans *trans,
399                  unsigned long caller_ip,
400                  struct btree_path *path,
401                  unsigned level),
402         TP_ARGS(trans, caller_ip, path, level),
403
404         TP_STRUCT__entry(
405                 __array(char,                   trans_fn, 32    )
406                 __field(unsigned long,          caller_ip       )
407                 __field(u8,                     btree_id        )
408                 __field(u8,                     level           )
409                 TRACE_BPOS_entries(pos)
410                 __array(char,                   node, 24        )
411                 __field(u32,                    iter_lock_seq   )
412                 __field(u32,                    node_lock_seq   )
413         ),
414
415         TP_fast_assign(
416                 struct btree *b = btree_path_node(path, level);
417
418                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
419                 __entry->caller_ip              = caller_ip;
420                 __entry->btree_id               = path->btree_id;
421                 __entry->level                  = path->level;
422                 TRACE_BPOS_assign(pos, path->pos);
423                 if (IS_ERR(b))
424                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
425                 else
426                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
427                 __entry->iter_lock_seq          = path->l[level].lock_seq;
428                 __entry->node_lock_seq          = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
429         ),
430
431         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u",
432                   __entry->trans_fn,
433                   (void *) __entry->caller_ip,
434                   bch2_btree_ids[__entry->btree_id],
435                   __entry->pos_inode,
436                   __entry->pos_offset,
437                   __entry->pos_snapshot,
438                   __entry->level,
439                   __entry->node,
440                   __entry->iter_lock_seq,
441                   __entry->node_lock_seq)
442 );
443
444 TRACE_EVENT(btree_path_upgrade_fail,
445         TP_PROTO(struct btree_trans *trans,
446                  unsigned long caller_ip,
447                  struct btree_path *path,
448                  unsigned level),
449         TP_ARGS(trans, caller_ip, path, level),
450
451         TP_STRUCT__entry(
452                 __array(char,                   trans_fn, 32    )
453                 __field(unsigned long,          caller_ip       )
454                 __field(u8,                     btree_id        )
455                 __field(u8,                     level           )
456                 TRACE_BPOS_entries(pos)
457                 __field(u8,                     locked          )
458                 __field(u8,                     self_read_count )
459                 __field(u8,                     self_intent_count)
460                 __field(u8,                     read_count      )
461                 __field(u8,                     intent_count    )
462                 __field(u32,                    iter_lock_seq   )
463                 __field(u32,                    node_lock_seq   )
464         ),
465
466         TP_fast_assign(
467                 struct six_lock_count c;
468
469                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
470                 __entry->caller_ip              = caller_ip;
471                 __entry->btree_id               = path->btree_id;
472                 __entry->level                  = level;
473                 TRACE_BPOS_assign(pos, path->pos);
474                 __entry->locked                 = btree_node_locked(path, level);
475
476                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
477                 __entry->self_read_count        = c.n[SIX_LOCK_read];
478                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
479                 c = six_lock_counts(&path->l[level].b->c.lock);
480                 __entry->read_count             = c.n[SIX_LOCK_read];
481                 __entry->intent_count           = c.n[SIX_LOCK_read];
482                 __entry->iter_lock_seq          = path->l[level].lock_seq;
483                 __entry->node_lock_seq          = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
484         ),
485
486         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
487                   __entry->trans_fn,
488                   (void *) __entry->caller_ip,
489                   bch2_btree_ids[__entry->btree_id],
490                   __entry->pos_inode,
491                   __entry->pos_offset,
492                   __entry->pos_snapshot,
493                   __entry->level,
494                   __entry->locked,
495                   __entry->self_read_count,
496                   __entry->self_intent_count,
497                   __entry->read_count,
498                   __entry->intent_count,
499                   __entry->iter_lock_seq,
500                   __entry->node_lock_seq)
501 );
502
503 /* Garbage collection */
504
505 DEFINE_EVENT(bch_fs, gc_gens_start,
506         TP_PROTO(struct bch_fs *c),
507         TP_ARGS(c)
508 );
509
510 DEFINE_EVENT(bch_fs, gc_gens_end,
511         TP_PROTO(struct bch_fs *c),
512         TP_ARGS(c)
513 );
514
515 /* Allocator */
516
517 DECLARE_EVENT_CLASS(bucket_alloc,
518         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
519                  bool user,
520                  u64 bucket,
521                  u64 free,
522                  u64 avail,
523                  u64 copygc_wait_amount,
524                  s64 copygc_waiting_for,
525                  struct bucket_alloc_state *s,
526                  bool nonblocking,
527                  const char *err),
528         TP_ARGS(ca, alloc_reserve, user, bucket, free, avail,
529                 copygc_wait_amount, copygc_waiting_for,
530                 s, nonblocking, err),
531
532         TP_STRUCT__entry(
533                 __field(dev_t,                  dev                     )
534                 __array(char,   reserve,        16                      )
535                 __field(bool,                   user    )
536                 __field(u64,                    bucket  )
537                 __field(u64,                    free                    )
538                 __field(u64,                    avail                   )
539                 __field(u64,                    copygc_wait_amount      )
540                 __field(s64,                    copygc_waiting_for      )
541                 __field(u64,                    seen                    )
542                 __field(u64,                    open                    )
543                 __field(u64,                    need_journal_commit     )
544                 __field(u64,                    nouse                   )
545                 __field(bool,                   nonblocking             )
546                 __field(u64,                    nocow                   )
547                 __array(char,                   err,    32              )
548         ),
549
550         TP_fast_assign(
551                 __entry->dev            = ca->dev;
552                 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
553                 __entry->user           = user;
554                 __entry->bucket         = bucket;
555                 __entry->free           = free;
556                 __entry->avail          = avail;
557                 __entry->copygc_wait_amount     = copygc_wait_amount;
558                 __entry->copygc_waiting_for     = copygc_waiting_for;
559                 __entry->seen           = s->buckets_seen;
560                 __entry->open           = s->skipped_open;
561                 __entry->need_journal_commit = s->skipped_need_journal_commit;
562                 __entry->nouse          = s->skipped_nouse;
563                 __entry->nonblocking    = nonblocking;
564                 __entry->nocow          = s->skipped_nocow;
565                 strscpy(__entry->err, err, sizeof(__entry->err));
566         ),
567
568         TP_printk("%d,%d reserve %s user %u bucket %llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
569                   MAJOR(__entry->dev), MINOR(__entry->dev),
570                   __entry->reserve,
571                   __entry->user,
572                   __entry->bucket,
573                   __entry->free,
574                   __entry->avail,
575                   __entry->copygc_wait_amount,
576                   __entry->copygc_waiting_for,
577                   __entry->seen,
578                   __entry->open,
579                   __entry->need_journal_commit,
580                   __entry->nouse,
581                   __entry->nocow,
582                   __entry->nonblocking,
583                   __entry->err)
584 );
585
586 DEFINE_EVENT(bucket_alloc, bucket_alloc,
587         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
588                  bool user,
589                  u64 bucket,
590                  u64 free,
591                  u64 avail,
592                  u64 copygc_wait_amount,
593                  s64 copygc_waiting_for,
594                  struct bucket_alloc_state *s,
595                  bool nonblocking,
596                  const char *err),
597         TP_ARGS(ca, alloc_reserve, user, bucket, free, avail,
598                 copygc_wait_amount, copygc_waiting_for,
599                 s, nonblocking, err)
600 );
601
602 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
603         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
604                  bool user,
605                  u64 bucket,
606                  u64 free,
607                  u64 avail,
608                  u64 copygc_wait_amount,
609                  s64 copygc_waiting_for,
610                  struct bucket_alloc_state *s,
611                  bool nonblocking,
612                  const char *err),
613         TP_ARGS(ca, alloc_reserve, user, bucket, free, avail,
614                 copygc_wait_amount, copygc_waiting_for,
615                 s, nonblocking, err)
616 );
617
618 TRACE_EVENT(discard_buckets,
619         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
620                  u64 need_journal_commit, u64 discarded, const char *err),
621         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
622
623         TP_STRUCT__entry(
624                 __field(dev_t,          dev                     )
625                 __field(u64,            seen                    )
626                 __field(u64,            open                    )
627                 __field(u64,            need_journal_commit     )
628                 __field(u64,            discarded               )
629                 __array(char,           err,    16              )
630         ),
631
632         TP_fast_assign(
633                 __entry->dev                    = c->dev;
634                 __entry->seen                   = seen;
635                 __entry->open                   = open;
636                 __entry->need_journal_commit    = need_journal_commit;
637                 __entry->discarded              = discarded;
638                 strscpy(__entry->err, err, sizeof(__entry->err));
639         ),
640
641         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
642                   MAJOR(__entry->dev), MINOR(__entry->dev),
643                   __entry->seen,
644                   __entry->open,
645                   __entry->need_journal_commit,
646                   __entry->discarded,
647                   __entry->err)
648 );
649
650 TRACE_EVENT(bucket_invalidate,
651         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
652         TP_ARGS(c, dev, bucket, sectors),
653
654         TP_STRUCT__entry(
655                 __field(dev_t,          dev                     )
656                 __field(u32,            dev_idx                 )
657                 __field(u32,            sectors                 )
658                 __field(u64,            bucket                  )
659         ),
660
661         TP_fast_assign(
662                 __entry->dev            = c->dev;
663                 __entry->dev_idx        = dev;
664                 __entry->sectors        = sectors;
665                 __entry->bucket         = bucket;
666         ),
667
668         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
669                   MAJOR(__entry->dev), MINOR(__entry->dev),
670                   __entry->dev_idx, __entry->bucket,
671                   __entry->sectors)
672 );
673
674 /* Moving IO */
675
676 DEFINE_EVENT(bkey, move_extent_read,
677         TP_PROTO(const struct bkey *k),
678         TP_ARGS(k)
679 );
680
681 DEFINE_EVENT(bkey, move_extent_write,
682         TP_PROTO(const struct bkey *k),
683         TP_ARGS(k)
684 );
685
686 DEFINE_EVENT(bkey, move_extent_finish,
687         TP_PROTO(const struct bkey *k),
688         TP_ARGS(k)
689 );
690
691 DEFINE_EVENT(bkey, move_extent_fail,
692         TP_PROTO(const struct bkey *k),
693         TP_ARGS(k)
694 );
695
696 DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
697         TP_PROTO(const struct bkey *k),
698         TP_ARGS(k)
699 );
700
701 TRACE_EVENT(move_data,
702         TP_PROTO(struct bch_fs *c, u64 sectors_moved,
703                  u64 keys_moved),
704         TP_ARGS(c, sectors_moved, keys_moved),
705
706         TP_STRUCT__entry(
707                 __field(dev_t,          dev                     )
708                 __field(u64,            sectors_moved   )
709                 __field(u64,            keys_moved      )
710         ),
711
712         TP_fast_assign(
713                 __entry->dev                    = c->dev;
714                 __entry->sectors_moved = sectors_moved;
715                 __entry->keys_moved = keys_moved;
716         ),
717
718         TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
719                   MAJOR(__entry->dev), MINOR(__entry->dev),
720                   __entry->sectors_moved, __entry->keys_moved)
721 );
722
723 TRACE_EVENT(evacuate_bucket,
724         TP_PROTO(struct bch_fs *c, struct bpos *bucket,
725                  unsigned sectors, unsigned bucket_size,
726                  u64 fragmentation, int ret),
727         TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
728
729         TP_STRUCT__entry(
730                 __field(dev_t,          dev             )
731                 __field(u64,            member          )
732                 __field(u64,            bucket          )
733                 __field(u32,            sectors         )
734                 __field(u32,            bucket_size     )
735                 __field(u64,            fragmentation   )
736                 __field(int,            ret             )
737         ),
738
739         TP_fast_assign(
740                 __entry->dev                    = c->dev;
741                 __entry->member                 = bucket->inode;
742                 __entry->bucket                 = bucket->offset;
743                 __entry->sectors                = sectors;
744                 __entry->bucket_size            = bucket_size;
745                 __entry->fragmentation          = fragmentation;
746                 __entry->ret                    = ret;
747         ),
748
749         TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
750                   MAJOR(__entry->dev), MINOR(__entry->dev),
751                   __entry->member, __entry->bucket,
752                   __entry->sectors, __entry->bucket_size,
753                   __entry->fragmentation, __entry->ret)
754 );
755
756 TRACE_EVENT(copygc,
757         TP_PROTO(struct bch_fs *c,
758                  u64 sectors_moved, u64 sectors_not_moved,
759                  u64 buckets_moved, u64 buckets_not_moved),
760         TP_ARGS(c,
761                 sectors_moved, sectors_not_moved,
762                 buckets_moved, buckets_not_moved),
763
764         TP_STRUCT__entry(
765                 __field(dev_t,          dev                     )
766                 __field(u64,            sectors_moved           )
767                 __field(u64,            sectors_not_moved       )
768                 __field(u64,            buckets_moved           )
769                 __field(u64,            buckets_not_moved       )
770         ),
771
772         TP_fast_assign(
773                 __entry->dev                    = c->dev;
774                 __entry->sectors_moved          = sectors_moved;
775                 __entry->sectors_not_moved      = sectors_not_moved;
776                 __entry->buckets_moved          = buckets_moved;
777                 __entry->buckets_not_moved = buckets_moved;
778         ),
779
780         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
781                   MAJOR(__entry->dev), MINOR(__entry->dev),
782                   __entry->sectors_moved, __entry->sectors_not_moved,
783                   __entry->buckets_moved, __entry->buckets_not_moved)
784 );
785
786 TRACE_EVENT(copygc_wait,
787         TP_PROTO(struct bch_fs *c,
788                  u64 wait_amount, u64 until),
789         TP_ARGS(c, wait_amount, until),
790
791         TP_STRUCT__entry(
792                 __field(dev_t,          dev                     )
793                 __field(u64,            wait_amount             )
794                 __field(u64,            until                   )
795         ),
796
797         TP_fast_assign(
798                 __entry->dev            = c->dev;
799                 __entry->wait_amount    = wait_amount;
800                 __entry->until          = until;
801         ),
802
803         TP_printk("%d,%u waiting for %llu sectors until %llu",
804                   MAJOR(__entry->dev), MINOR(__entry->dev),
805                   __entry->wait_amount, __entry->until)
806 );
807
808 /* btree transactions: */
809
810 DECLARE_EVENT_CLASS(transaction_event,
811         TP_PROTO(struct btree_trans *trans,
812                  unsigned long caller_ip),
813         TP_ARGS(trans, caller_ip),
814
815         TP_STRUCT__entry(
816                 __array(char,                   trans_fn, 32    )
817                 __field(unsigned long,          caller_ip       )
818         ),
819
820         TP_fast_assign(
821                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
822                 __entry->caller_ip              = caller_ip;
823         ),
824
825         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
826 );
827
828 DEFINE_EVENT(transaction_event, transaction_commit,
829         TP_PROTO(struct btree_trans *trans,
830                  unsigned long caller_ip),
831         TP_ARGS(trans, caller_ip)
832 );
833
834 DEFINE_EVENT(transaction_event, trans_restart_injected,
835         TP_PROTO(struct btree_trans *trans,
836                  unsigned long caller_ip),
837         TP_ARGS(trans, caller_ip)
838 );
839
840 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
841         TP_PROTO(struct btree_trans *trans,
842                  unsigned long caller_ip),
843         TP_ARGS(trans, caller_ip)
844 );
845
846 DEFINE_EVENT(transaction_event, trans_restart_journal_res_get,
847         TP_PROTO(struct btree_trans *trans,
848                  unsigned long caller_ip),
849         TP_ARGS(trans, caller_ip)
850 );
851
852
853 TRACE_EVENT(trans_restart_journal_preres_get,
854         TP_PROTO(struct btree_trans *trans,
855                  unsigned long caller_ip,
856                  unsigned flags),
857         TP_ARGS(trans, caller_ip, flags),
858
859         TP_STRUCT__entry(
860                 __array(char,                   trans_fn, 32    )
861                 __field(unsigned long,          caller_ip       )
862                 __field(unsigned,               flags           )
863         ),
864
865         TP_fast_assign(
866                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
867                 __entry->caller_ip              = caller_ip;
868                 __entry->flags                  = flags;
869         ),
870
871         TP_printk("%s %pS %x", __entry->trans_fn,
872                   (void *) __entry->caller_ip,
873                   __entry->flags)
874 );
875
876 DEFINE_EVENT(transaction_event, trans_restart_journal_reclaim,
877         TP_PROTO(struct btree_trans *trans,
878                  unsigned long caller_ip),
879         TP_ARGS(trans, caller_ip)
880 );
881
882 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
883         TP_PROTO(struct btree_trans *trans,
884                  unsigned long caller_ip),
885         TP_ARGS(trans, caller_ip)
886 );
887
888 DEFINE_EVENT(transaction_event, trans_traverse_all,
889         TP_PROTO(struct btree_trans *trans,
890                  unsigned long caller_ip),
891         TP_ARGS(trans, caller_ip)
892 );
893
894 DEFINE_EVENT(transaction_event, trans_restart_mark_replicas,
895         TP_PROTO(struct btree_trans *trans,
896                  unsigned long caller_ip),
897         TP_ARGS(trans, caller_ip)
898 );
899
900 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
901         TP_PROTO(struct btree_trans *trans,
902                  unsigned long caller_ip),
903         TP_ARGS(trans, caller_ip)
904 );
905
906 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
907         TP_PROTO(struct btree_trans *trans,
908                  unsigned long caller_ip),
909         TP_ARGS(trans, caller_ip)
910 );
911
912 DECLARE_EVENT_CLASS(transaction_restart_iter,
913         TP_PROTO(struct btree_trans *trans,
914                  unsigned long caller_ip,
915                  struct btree_path *path),
916         TP_ARGS(trans, caller_ip, path),
917
918         TP_STRUCT__entry(
919                 __array(char,                   trans_fn, 32    )
920                 __field(unsigned long,          caller_ip       )
921                 __field(u8,                     btree_id        )
922                 TRACE_BPOS_entries(pos)
923         ),
924
925         TP_fast_assign(
926                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
927                 __entry->caller_ip              = caller_ip;
928                 __entry->btree_id               = path->btree_id;
929                 TRACE_BPOS_assign(pos, path->pos)
930         ),
931
932         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
933                   __entry->trans_fn,
934                   (void *) __entry->caller_ip,
935                   bch2_btree_ids[__entry->btree_id],
936                   __entry->pos_inode,
937                   __entry->pos_offset,
938                   __entry->pos_snapshot)
939 );
940
941 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
942         TP_PROTO(struct btree_trans *trans,
943                  unsigned long caller_ip,
944                  struct btree_path *path),
945         TP_ARGS(trans, caller_ip, path)
946 );
947
948 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
949         TP_PROTO(struct btree_trans *trans,
950                  unsigned long caller_ip,
951                  struct btree_path *path),
952         TP_ARGS(trans, caller_ip, path)
953 );
954
955 TRACE_EVENT(trans_restart_upgrade,
956         TP_PROTO(struct btree_trans *trans,
957                  unsigned long caller_ip,
958                  struct btree_path *path,
959                  unsigned old_locks_want,
960                  unsigned new_locks_want),
961         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
962
963         TP_STRUCT__entry(
964                 __array(char,                   trans_fn, 32    )
965                 __field(unsigned long,          caller_ip       )
966                 __field(u8,                     btree_id        )
967                 __field(u8,                     old_locks_want  )
968                 __field(u8,                     new_locks_want  )
969                 TRACE_BPOS_entries(pos)
970         ),
971
972         TP_fast_assign(
973                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
974                 __entry->caller_ip              = caller_ip;
975                 __entry->btree_id               = path->btree_id;
976                 __entry->old_locks_want         = old_locks_want;
977                 __entry->new_locks_want         = new_locks_want;
978                 TRACE_BPOS_assign(pos, path->pos)
979         ),
980
981         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
982                   __entry->trans_fn,
983                   (void *) __entry->caller_ip,
984                   bch2_btree_ids[__entry->btree_id],
985                   __entry->pos_inode,
986                   __entry->pos_offset,
987                   __entry->pos_snapshot,
988                   __entry->old_locks_want,
989                   __entry->new_locks_want)
990 );
991
992 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
993         TP_PROTO(struct btree_trans *trans,
994                  unsigned long caller_ip,
995                  struct btree_path *path),
996         TP_ARGS(trans, caller_ip, path)
997 );
998
999 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
1000         TP_PROTO(struct btree_trans *trans,
1001                  unsigned long caller_ip,
1002                  struct btree_path *path),
1003         TP_ARGS(trans, caller_ip, path)
1004 );
1005
1006 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
1007         TP_PROTO(struct btree_trans *trans,
1008                  unsigned long caller_ip,
1009                  struct btree_path *path),
1010         TP_ARGS(trans, caller_ip, path)
1011 );
1012
1013 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
1014         TP_PROTO(struct btree_trans *trans,
1015                  unsigned long caller_ip,
1016                  struct btree_path *path),
1017         TP_ARGS(trans, caller_ip, path)
1018 );
1019
1020 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1021         TP_PROTO(struct btree_trans *trans,
1022                  unsigned long caller_ip),
1023         TP_ARGS(trans, caller_ip)
1024 );
1025
1026 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
1027         TP_PROTO(struct btree_trans *trans,
1028                  unsigned long caller_ip,
1029                  struct btree_path *path),
1030         TP_ARGS(trans, caller_ip, path)
1031 );
1032
1033 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
1034         TP_PROTO(struct btree_trans *trans,
1035                  unsigned long caller_ip,
1036                  struct btree_path *path),
1037         TP_ARGS(trans, caller_ip, path)
1038 );
1039
1040 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
1041         TP_PROTO(struct btree_trans *trans,
1042                  unsigned long caller_ip,
1043                  struct btree_path *path),
1044         TP_ARGS(trans, caller_ip, path)
1045 );
1046
1047 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
1048         TP_PROTO(struct btree_trans *trans,
1049                  unsigned long caller_ip,
1050                  struct btree_path *path),
1051         TP_ARGS(trans, caller_ip, path)
1052 );
1053
1054 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1055         TP_PROTO(struct btree_trans *trans,
1056                  unsigned long caller_ip,
1057                  struct btree_path *path),
1058         TP_ARGS(trans, caller_ip, path)
1059 );
1060
1061 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
1062         TP_PROTO(struct btree_trans *trans,
1063                  unsigned long caller_ip),
1064         TP_ARGS(trans, caller_ip)
1065 );
1066
1067 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1068         TP_PROTO(struct btree_trans *trans,
1069                  unsigned long caller_ip),
1070         TP_ARGS(trans, caller_ip)
1071 );
1072
1073 TRACE_EVENT(trans_restart_would_deadlock_write,
1074         TP_PROTO(struct btree_trans *trans),
1075         TP_ARGS(trans),
1076
1077         TP_STRUCT__entry(
1078                 __array(char,                   trans_fn, 32    )
1079         ),
1080
1081         TP_fast_assign(
1082                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1083         ),
1084
1085         TP_printk("%s", __entry->trans_fn)
1086 );
1087
1088 TRACE_EVENT(trans_restart_mem_realloced,
1089         TP_PROTO(struct btree_trans *trans,
1090                  unsigned long caller_ip,
1091                  unsigned long bytes),
1092         TP_ARGS(trans, caller_ip, bytes),
1093
1094         TP_STRUCT__entry(
1095                 __array(char,                   trans_fn, 32    )
1096                 __field(unsigned long,          caller_ip       )
1097                 __field(unsigned long,          bytes           )
1098         ),
1099
1100         TP_fast_assign(
1101                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1102                 __entry->caller_ip      = caller_ip;
1103                 __entry->bytes          = bytes;
1104         ),
1105
1106         TP_printk("%s %pS bytes %lu",
1107                   __entry->trans_fn,
1108                   (void *) __entry->caller_ip,
1109                   __entry->bytes)
1110 );
1111
1112 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1113         TP_PROTO(struct btree_trans *trans,
1114                  unsigned long caller_ip,
1115                  struct btree_path *path,
1116                  unsigned old_u64s,
1117                  unsigned new_u64s),
1118         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1119
1120         TP_STRUCT__entry(
1121                 __array(char,                   trans_fn, 32    )
1122                 __field(unsigned long,          caller_ip       )
1123                 __field(enum btree_id,          btree_id        )
1124                 TRACE_BPOS_entries(pos)
1125                 __field(u32,                    old_u64s        )
1126                 __field(u32,                    new_u64s        )
1127         ),
1128
1129         TP_fast_assign(
1130                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1131                 __entry->caller_ip              = caller_ip;
1132
1133                 __entry->btree_id       = path->btree_id;
1134                 TRACE_BPOS_assign(pos, path->pos);
1135                 __entry->old_u64s       = old_u64s;
1136                 __entry->new_u64s       = new_u64s;
1137         ),
1138
1139         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1140                   __entry->trans_fn,
1141                   (void *) __entry->caller_ip,
1142                   bch2_btree_ids[__entry->btree_id],
1143                   __entry->pos_inode,
1144                   __entry->pos_offset,
1145                   __entry->pos_snapshot,
1146                   __entry->old_u64s,
1147                   __entry->new_u64s)
1148 );
1149
1150 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1151         TP_PROTO(struct btree_trans *trans,
1152                  unsigned long caller_ip),
1153         TP_ARGS(trans, caller_ip)
1154 );
1155
1156 TRACE_EVENT(write_buffer_flush,
1157         TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1158         TP_ARGS(trans, nr, skipped, fast, size),
1159
1160         TP_STRUCT__entry(
1161                 __field(size_t,         nr              )
1162                 __field(size_t,         skipped         )
1163                 __field(size_t,         fast            )
1164                 __field(size_t,         size            )
1165         ),
1166
1167         TP_fast_assign(
1168                 __entry->nr     = nr;
1169                 __entry->skipped = skipped;
1170                 __entry->fast   = fast;
1171                 __entry->size   = size;
1172         ),
1173
1174         TP_printk("%zu/%zu skipped %zu fast %zu",
1175                   __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1176 );
1177
1178 TRACE_EVENT(write_buffer_flush_slowpath,
1179         TP_PROTO(struct btree_trans *trans, size_t nr, size_t size),
1180         TP_ARGS(trans, nr, size),
1181
1182         TP_STRUCT__entry(
1183                 __field(size_t,         nr              )
1184                 __field(size_t,         size            )
1185         ),
1186
1187         TP_fast_assign(
1188                 __entry->nr     = nr;
1189                 __entry->size   = size;
1190         ),
1191
1192         TP_printk("%zu/%zu", __entry->nr, __entry->size)
1193 );
1194
1195 #endif /* _TRACE_BCACHE_H */
1196
1197 /* This part must be outside protection */
1198 #include <trace/define_trace.h>