]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/trace.h
Update bcachefs sources to feaca6edbd24 mean and variance: Promote to lib/math
[bcachefs-tools-debian] / libbcachefs / trace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7
8 #include <linux/tracepoint.h>
9
10 #define TRACE_BPOS_entries(name)                                \
11         __field(u64,                    name##_inode    )       \
12         __field(u64,                    name##_offset   )       \
13         __field(u32,                    name##_snapshot )
14
15 #define TRACE_BPOS_assign(dst, src)                             \
16         __entry->dst##_inode            = (src).inode;          \
17         __entry->dst##_offset           = (src).offset;         \
18         __entry->dst##_snapshot         = (src).snapshot
19
20 DECLARE_EVENT_CLASS(bpos,
21         TP_PROTO(const struct bpos *p),
22         TP_ARGS(p),
23
24         TP_STRUCT__entry(
25                 TRACE_BPOS_entries(p)
26         ),
27
28         TP_fast_assign(
29                 TRACE_BPOS_assign(p, *p);
30         ),
31
32         TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34
35 DECLARE_EVENT_CLASS(str,
36         TP_PROTO(struct bch_fs *c, const char *str),
37         TP_ARGS(c, str),
38
39         TP_STRUCT__entry(
40                 __field(dev_t,          dev                     )
41                 __string(str,           str                     )
42         ),
43
44         TP_fast_assign(
45                 __entry->dev            = c->dev;
46                 __assign_str(str, str);
47         ),
48
49         TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
50 );
51
52 DECLARE_EVENT_CLASS(btree_node,
53         TP_PROTO(struct bch_fs *c, struct btree *b),
54         TP_ARGS(c, b),
55
56         TP_STRUCT__entry(
57                 __field(dev_t,          dev                     )
58                 __field(u8,             level                   )
59                 __field(u8,             btree_id                )
60                 TRACE_BPOS_entries(pos)
61         ),
62
63         TP_fast_assign(
64                 __entry->dev            = c->dev;
65                 __entry->level          = b->c.level;
66                 __entry->btree_id       = b->c.btree_id;
67                 TRACE_BPOS_assign(pos, b->key.k.p);
68         ),
69
70         TP_printk("%d,%d %u %s %llu:%llu:%u",
71                   MAJOR(__entry->dev), MINOR(__entry->dev),
72                   __entry->level,
73                   bch2_btree_id_str(__entry->btree_id),
74                   __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
75 );
76
77 DECLARE_EVENT_CLASS(bch_fs,
78         TP_PROTO(struct bch_fs *c),
79         TP_ARGS(c),
80
81         TP_STRUCT__entry(
82                 __field(dev_t,          dev                     )
83         ),
84
85         TP_fast_assign(
86                 __entry->dev            = c->dev;
87         ),
88
89         TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
90 );
91
92 DECLARE_EVENT_CLASS(bio,
93         TP_PROTO(struct bio *bio),
94         TP_ARGS(bio),
95
96         TP_STRUCT__entry(
97                 __field(dev_t,          dev                     )
98                 __field(sector_t,       sector                  )
99                 __field(unsigned int,   nr_sector               )
100                 __array(char,           rwbs,   6               )
101         ),
102
103         TP_fast_assign(
104                 __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
105                 __entry->sector         = bio->bi_iter.bi_sector;
106                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
107                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
108         ),
109
110         TP_printk("%d,%d  %s %llu + %u",
111                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
112                   (unsigned long long)__entry->sector, __entry->nr_sector)
113 );
114
115 /* super-io.c: */
116 TRACE_EVENT(write_super,
117         TP_PROTO(struct bch_fs *c, unsigned long ip),
118         TP_ARGS(c, ip),
119
120         TP_STRUCT__entry(
121                 __field(dev_t,          dev     )
122                 __field(unsigned long,  ip      )
123         ),
124
125         TP_fast_assign(
126                 __entry->dev            = c->dev;
127                 __entry->ip             = ip;
128         ),
129
130         TP_printk("%d,%d for %pS",
131                   MAJOR(__entry->dev), MINOR(__entry->dev),
132                   (void *) __entry->ip)
133 );
134
135 /* io.c: */
136
137 DEFINE_EVENT(bio, read_promote,
138         TP_PROTO(struct bio *bio),
139         TP_ARGS(bio)
140 );
141
142 TRACE_EVENT(read_nopromote,
143         TP_PROTO(struct bch_fs *c, int ret),
144         TP_ARGS(c, ret),
145
146         TP_STRUCT__entry(
147                 __field(dev_t,          dev             )
148                 __array(char,           ret, 32         )
149         ),
150
151         TP_fast_assign(
152                 __entry->dev            = c->dev;
153                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
154         ),
155
156         TP_printk("%d,%d ret %s",
157                   MAJOR(__entry->dev), MINOR(__entry->dev),
158                   __entry->ret)
159 );
160
161 DEFINE_EVENT(bio, read_bounce,
162         TP_PROTO(struct bio *bio),
163         TP_ARGS(bio)
164 );
165
166 DEFINE_EVENT(bio, read_split,
167         TP_PROTO(struct bio *bio),
168         TP_ARGS(bio)
169 );
170
171 DEFINE_EVENT(bio, read_retry,
172         TP_PROTO(struct bio *bio),
173         TP_ARGS(bio)
174 );
175
176 DEFINE_EVENT(bio, read_reuse_race,
177         TP_PROTO(struct bio *bio),
178         TP_ARGS(bio)
179 );
180
181 /* Journal */
182
183 DEFINE_EVENT(bch_fs, journal_full,
184         TP_PROTO(struct bch_fs *c),
185         TP_ARGS(c)
186 );
187
188 DEFINE_EVENT(bch_fs, journal_entry_full,
189         TP_PROTO(struct bch_fs *c),
190         TP_ARGS(c)
191 );
192
193 TRACE_EVENT(journal_entry_close,
194         TP_PROTO(struct bch_fs *c, unsigned bytes),
195         TP_ARGS(c, bytes),
196
197         TP_STRUCT__entry(
198                 __field(dev_t,          dev                     )
199                 __field(u32,            bytes                   )
200         ),
201
202         TP_fast_assign(
203                 __entry->dev                    = c->dev;
204                 __entry->bytes                  = bytes;
205         ),
206
207         TP_printk("%d,%d entry bytes %u",
208                   MAJOR(__entry->dev), MINOR(__entry->dev),
209                   __entry->bytes)
210 );
211
212 DEFINE_EVENT(bio, journal_write,
213         TP_PROTO(struct bio *bio),
214         TP_ARGS(bio)
215 );
216
217 TRACE_EVENT(journal_reclaim_start,
218         TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
219                  u64 min_nr, u64 min_key_cache,
220                  u64 btree_cache_dirty, u64 btree_cache_total,
221                  u64 btree_key_cache_dirty, u64 btree_key_cache_total),
222         TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
223                 btree_cache_dirty, btree_cache_total,
224                 btree_key_cache_dirty, btree_key_cache_total),
225
226         TP_STRUCT__entry(
227                 __field(dev_t,          dev                     )
228                 __field(bool,           direct                  )
229                 __field(bool,           kicked                  )
230                 __field(u64,            min_nr                  )
231                 __field(u64,            min_key_cache           )
232                 __field(u64,            btree_cache_dirty       )
233                 __field(u64,            btree_cache_total       )
234                 __field(u64,            btree_key_cache_dirty   )
235                 __field(u64,            btree_key_cache_total   )
236         ),
237
238         TP_fast_assign(
239                 __entry->dev                    = c->dev;
240                 __entry->direct                 = direct;
241                 __entry->kicked                 = kicked;
242                 __entry->min_nr                 = min_nr;
243                 __entry->min_key_cache          = min_key_cache;
244                 __entry->btree_cache_dirty      = btree_cache_dirty;
245                 __entry->btree_cache_total      = btree_cache_total;
246                 __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
247                 __entry->btree_key_cache_total  = btree_key_cache_total;
248         ),
249
250         TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
251                   MAJOR(__entry->dev), MINOR(__entry->dev),
252                   __entry->direct,
253                   __entry->kicked,
254                   __entry->min_nr,
255                   __entry->min_key_cache,
256                   __entry->btree_cache_dirty,
257                   __entry->btree_cache_total,
258                   __entry->btree_key_cache_dirty,
259                   __entry->btree_key_cache_total)
260 );
261
262 TRACE_EVENT(journal_reclaim_finish,
263         TP_PROTO(struct bch_fs *c, u64 nr_flushed),
264         TP_ARGS(c, nr_flushed),
265
266         TP_STRUCT__entry(
267                 __field(dev_t,          dev                     )
268                 __field(u64,            nr_flushed              )
269         ),
270
271         TP_fast_assign(
272                 __entry->dev            = c->dev;
273                 __entry->nr_flushed     = nr_flushed;
274         ),
275
276         TP_printk("%d,%d flushed %llu",
277                   MAJOR(__entry->dev), MINOR(__entry->dev),
278                   __entry->nr_flushed)
279 );
280
281 /* bset.c: */
282
283 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
284         TP_PROTO(const struct bpos *p),
285         TP_ARGS(p)
286 );
287
288 /* Btree cache: */
289
290 TRACE_EVENT(btree_cache_scan,
291         TP_PROTO(long nr_to_scan, long can_free, long ret),
292         TP_ARGS(nr_to_scan, can_free, ret),
293
294         TP_STRUCT__entry(
295                 __field(long,   nr_to_scan              )
296                 __field(long,   can_free                )
297                 __field(long,   ret                     )
298         ),
299
300         TP_fast_assign(
301                 __entry->nr_to_scan     = nr_to_scan;
302                 __entry->can_free       = can_free;
303                 __entry->ret            = ret;
304         ),
305
306         TP_printk("scanned for %li nodes, can free %li, ret %li",
307                   __entry->nr_to_scan, __entry->can_free, __entry->ret)
308 );
309
310 DEFINE_EVENT(btree_node, btree_cache_reap,
311         TP_PROTO(struct bch_fs *c, struct btree *b),
312         TP_ARGS(c, b)
313 );
314
315 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
316         TP_PROTO(struct bch_fs *c),
317         TP_ARGS(c)
318 );
319
320 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
321         TP_PROTO(struct bch_fs *c),
322         TP_ARGS(c)
323 );
324
325 DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
326         TP_PROTO(struct bch_fs *c),
327         TP_ARGS(c)
328 );
329
330 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
331         TP_PROTO(struct bch_fs *c),
332         TP_ARGS(c)
333 );
334
335 /* Btree */
336
337 DEFINE_EVENT(btree_node, btree_node_read,
338         TP_PROTO(struct bch_fs *c, struct btree *b),
339         TP_ARGS(c, b)
340 );
341
342 TRACE_EVENT(btree_node_write,
343         TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
344         TP_ARGS(b, bytes, sectors),
345
346         TP_STRUCT__entry(
347                 __field(enum btree_node_type,   type)
348                 __field(unsigned,       bytes                   )
349                 __field(unsigned,       sectors                 )
350         ),
351
352         TP_fast_assign(
353                 __entry->type   = btree_node_type(b);
354                 __entry->bytes  = bytes;
355                 __entry->sectors = sectors;
356         ),
357
358         TP_printk("bkey type %u bytes %u sectors %u",
359                   __entry->type , __entry->bytes, __entry->sectors)
360 );
361
362 DEFINE_EVENT(btree_node, btree_node_alloc,
363         TP_PROTO(struct bch_fs *c, struct btree *b),
364         TP_ARGS(c, b)
365 );
366
367 DEFINE_EVENT(btree_node, btree_node_free,
368         TP_PROTO(struct bch_fs *c, struct btree *b),
369         TP_ARGS(c, b)
370 );
371
372 TRACE_EVENT(btree_reserve_get_fail,
373         TP_PROTO(const char *trans_fn,
374                  unsigned long caller_ip,
375                  size_t required,
376                  int ret),
377         TP_ARGS(trans_fn, caller_ip, required, ret),
378
379         TP_STRUCT__entry(
380                 __array(char,                   trans_fn, 32    )
381                 __field(unsigned long,          caller_ip       )
382                 __field(size_t,                 required        )
383                 __array(char,                   ret, 32         )
384         ),
385
386         TP_fast_assign(
387                 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
388                 __entry->caller_ip      = caller_ip;
389                 __entry->required       = required;
390                 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
391         ),
392
393         TP_printk("%s %pS required %zu ret %s",
394                   __entry->trans_fn,
395                   (void *) __entry->caller_ip,
396                   __entry->required,
397                   __entry->ret)
398 );
399
400 DEFINE_EVENT(btree_node, btree_node_compact,
401         TP_PROTO(struct bch_fs *c, struct btree *b),
402         TP_ARGS(c, b)
403 );
404
405 DEFINE_EVENT(btree_node, btree_node_merge,
406         TP_PROTO(struct bch_fs *c, struct btree *b),
407         TP_ARGS(c, b)
408 );
409
410 DEFINE_EVENT(btree_node, btree_node_split,
411         TP_PROTO(struct bch_fs *c, struct btree *b),
412         TP_ARGS(c, b)
413 );
414
415 DEFINE_EVENT(btree_node, btree_node_rewrite,
416         TP_PROTO(struct bch_fs *c, struct btree *b),
417         TP_ARGS(c, b)
418 );
419
420 DEFINE_EVENT(btree_node, btree_node_set_root,
421         TP_PROTO(struct bch_fs *c, struct btree *b),
422         TP_ARGS(c, b)
423 );
424
425 TRACE_EVENT(btree_path_relock_fail,
426         TP_PROTO(struct btree_trans *trans,
427                  unsigned long caller_ip,
428                  struct btree_path *path,
429                  unsigned level),
430         TP_ARGS(trans, caller_ip, path, level),
431
432         TP_STRUCT__entry(
433                 __array(char,                   trans_fn, 32    )
434                 __field(unsigned long,          caller_ip       )
435                 __field(u8,                     btree_id        )
436                 __field(u8,                     level           )
437                 TRACE_BPOS_entries(pos)
438                 __array(char,                   node, 24        )
439                 __field(u8,                     self_read_count )
440                 __field(u8,                     self_intent_count)
441                 __field(u8,                     read_count      )
442                 __field(u8,                     intent_count    )
443                 __field(u32,                    iter_lock_seq   )
444                 __field(u32,                    node_lock_seq   )
445         ),
446
447         TP_fast_assign(
448                 struct btree *b = btree_path_node(path, level);
449                 struct six_lock_count c;
450
451                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
452                 __entry->caller_ip              = caller_ip;
453                 __entry->btree_id               = path->btree_id;
454                 __entry->level                  = path->level;
455                 TRACE_BPOS_assign(pos, path->pos);
456
457                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
458                 __entry->self_read_count        = c.n[SIX_LOCK_read];
459                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
460
461                 if (IS_ERR(b)) {
462                         strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
463                 } else {
464                         c = six_lock_counts(&path->l[level].b->c.lock);
465                         __entry->read_count     = c.n[SIX_LOCK_read];
466                         __entry->intent_count   = c.n[SIX_LOCK_intent];
467                         scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
468                 }
469                 __entry->iter_lock_seq          = path->l[level].lock_seq;
470                 __entry->node_lock_seq          = is_btree_node(path, level)
471                         ? six_lock_seq(&path->l[level].b->c.lock)
472                         : 0;
473         ),
474
475         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
476                   __entry->trans_fn,
477                   (void *) __entry->caller_ip,
478                   bch2_btree_id_str(__entry->btree_id),
479                   __entry->pos_inode,
480                   __entry->pos_offset,
481                   __entry->pos_snapshot,
482                   __entry->level,
483                   __entry->node,
484                   __entry->self_read_count,
485                   __entry->self_intent_count,
486                   __entry->read_count,
487                   __entry->intent_count,
488                   __entry->iter_lock_seq,
489                   __entry->node_lock_seq)
490 );
491
492 TRACE_EVENT(btree_path_upgrade_fail,
493         TP_PROTO(struct btree_trans *trans,
494                  unsigned long caller_ip,
495                  struct btree_path *path,
496                  unsigned level),
497         TP_ARGS(trans, caller_ip, path, level),
498
499         TP_STRUCT__entry(
500                 __array(char,                   trans_fn, 32    )
501                 __field(unsigned long,          caller_ip       )
502                 __field(u8,                     btree_id        )
503                 __field(u8,                     level           )
504                 TRACE_BPOS_entries(pos)
505                 __field(u8,                     locked          )
506                 __field(u8,                     self_read_count )
507                 __field(u8,                     self_intent_count)
508                 __field(u8,                     read_count      )
509                 __field(u8,                     intent_count    )
510                 __field(u32,                    iter_lock_seq   )
511                 __field(u32,                    node_lock_seq   )
512         ),
513
514         TP_fast_assign(
515                 struct six_lock_count c;
516
517                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
518                 __entry->caller_ip              = caller_ip;
519                 __entry->btree_id               = path->btree_id;
520                 __entry->level                  = level;
521                 TRACE_BPOS_assign(pos, path->pos);
522                 __entry->locked                 = btree_node_locked(path, level);
523
524                 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
525                 __entry->self_read_count        = c.n[SIX_LOCK_read];
526                 __entry->self_intent_count      = c.n[SIX_LOCK_intent];
527                 c = six_lock_counts(&path->l[level].b->c.lock);
528                 __entry->read_count             = c.n[SIX_LOCK_read];
529                 __entry->intent_count           = c.n[SIX_LOCK_intent];
530                 __entry->iter_lock_seq          = path->l[level].lock_seq;
531                 __entry->node_lock_seq          = is_btree_node(path, level)
532                         ? six_lock_seq(&path->l[level].b->c.lock)
533                         : 0;
534         ),
535
536         TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
537                   __entry->trans_fn,
538                   (void *) __entry->caller_ip,
539                   bch2_btree_id_str(__entry->btree_id),
540                   __entry->pos_inode,
541                   __entry->pos_offset,
542                   __entry->pos_snapshot,
543                   __entry->level,
544                   __entry->locked,
545                   __entry->self_read_count,
546                   __entry->self_intent_count,
547                   __entry->read_count,
548                   __entry->intent_count,
549                   __entry->iter_lock_seq,
550                   __entry->node_lock_seq)
551 );
552
553 /* Garbage collection */
554
555 DEFINE_EVENT(bch_fs, gc_gens_start,
556         TP_PROTO(struct bch_fs *c),
557         TP_ARGS(c)
558 );
559
560 DEFINE_EVENT(bch_fs, gc_gens_end,
561         TP_PROTO(struct bch_fs *c),
562         TP_ARGS(c)
563 );
564
565 /* Allocator */
566
567 DECLARE_EVENT_CLASS(bucket_alloc,
568         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
569                  u64 bucket,
570                  u64 free,
571                  u64 avail,
572                  u64 copygc_wait_amount,
573                  s64 copygc_waiting_for,
574                  struct bucket_alloc_state *s,
575                  bool nonblocking,
576                  const char *err),
577         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
578                 copygc_wait_amount, copygc_waiting_for,
579                 s, nonblocking, err),
580
581         TP_STRUCT__entry(
582                 __field(u8,                     dev                     )
583                 __array(char,   reserve,        16                      )
584                 __field(u64,                    bucket  )
585                 __field(u64,                    free                    )
586                 __field(u64,                    avail                   )
587                 __field(u64,                    copygc_wait_amount      )
588                 __field(s64,                    copygc_waiting_for      )
589                 __field(u64,                    seen                    )
590                 __field(u64,                    open                    )
591                 __field(u64,                    need_journal_commit     )
592                 __field(u64,                    nouse                   )
593                 __field(bool,                   nonblocking             )
594                 __field(u64,                    nocow                   )
595                 __array(char,                   err,    32              )
596         ),
597
598         TP_fast_assign(
599                 __entry->dev            = ca->dev_idx;
600                 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
601                 __entry->bucket         = bucket;
602                 __entry->free           = free;
603                 __entry->avail          = avail;
604                 __entry->copygc_wait_amount     = copygc_wait_amount;
605                 __entry->copygc_waiting_for     = copygc_waiting_for;
606                 __entry->seen           = s->buckets_seen;
607                 __entry->open           = s->skipped_open;
608                 __entry->need_journal_commit = s->skipped_need_journal_commit;
609                 __entry->nouse          = s->skipped_nouse;
610                 __entry->nonblocking    = nonblocking;
611                 __entry->nocow          = s->skipped_nocow;
612                 strscpy(__entry->err, err, sizeof(__entry->err));
613         ),
614
615         TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
616                   __entry->reserve,
617                   __entry->dev,
618                   __entry->bucket,
619                   __entry->free,
620                   __entry->avail,
621                   __entry->copygc_wait_amount,
622                   __entry->copygc_waiting_for,
623                   __entry->seen,
624                   __entry->open,
625                   __entry->need_journal_commit,
626                   __entry->nouse,
627                   __entry->nocow,
628                   __entry->nonblocking,
629                   __entry->err)
630 );
631
632 DEFINE_EVENT(bucket_alloc, bucket_alloc,
633         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
634                  u64 bucket,
635                  u64 free,
636                  u64 avail,
637                  u64 copygc_wait_amount,
638                  s64 copygc_waiting_for,
639                  struct bucket_alloc_state *s,
640                  bool nonblocking,
641                  const char *err),
642         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
643                 copygc_wait_amount, copygc_waiting_for,
644                 s, nonblocking, err)
645 );
646
647 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
648         TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
649                  u64 bucket,
650                  u64 free,
651                  u64 avail,
652                  u64 copygc_wait_amount,
653                  s64 copygc_waiting_for,
654                  struct bucket_alloc_state *s,
655                  bool nonblocking,
656                  const char *err),
657         TP_ARGS(ca, alloc_reserve, bucket, free, avail,
658                 copygc_wait_amount, copygc_waiting_for,
659                 s, nonblocking, err)
660 );
661
662 TRACE_EVENT(discard_buckets,
663         TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
664                  u64 need_journal_commit, u64 discarded, const char *err),
665         TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
666
667         TP_STRUCT__entry(
668                 __field(dev_t,          dev                     )
669                 __field(u64,            seen                    )
670                 __field(u64,            open                    )
671                 __field(u64,            need_journal_commit     )
672                 __field(u64,            discarded               )
673                 __array(char,           err,    16              )
674         ),
675
676         TP_fast_assign(
677                 __entry->dev                    = c->dev;
678                 __entry->seen                   = seen;
679                 __entry->open                   = open;
680                 __entry->need_journal_commit    = need_journal_commit;
681                 __entry->discarded              = discarded;
682                 strscpy(__entry->err, err, sizeof(__entry->err));
683         ),
684
685         TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
686                   MAJOR(__entry->dev), MINOR(__entry->dev),
687                   __entry->seen,
688                   __entry->open,
689                   __entry->need_journal_commit,
690                   __entry->discarded,
691                   __entry->err)
692 );
693
694 TRACE_EVENT(bucket_invalidate,
695         TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
696         TP_ARGS(c, dev, bucket, sectors),
697
698         TP_STRUCT__entry(
699                 __field(dev_t,          dev                     )
700                 __field(u32,            dev_idx                 )
701                 __field(u32,            sectors                 )
702                 __field(u64,            bucket                  )
703         ),
704
705         TP_fast_assign(
706                 __entry->dev            = c->dev;
707                 __entry->dev_idx        = dev;
708                 __entry->sectors        = sectors;
709                 __entry->bucket         = bucket;
710         ),
711
712         TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
713                   MAJOR(__entry->dev), MINOR(__entry->dev),
714                   __entry->dev_idx, __entry->bucket,
715                   __entry->sectors)
716 );
717
718 /* Moving IO */
719
720 TRACE_EVENT(bucket_evacuate,
721         TP_PROTO(struct bch_fs *c, struct bpos *bucket),
722         TP_ARGS(c, bucket),
723
724         TP_STRUCT__entry(
725                 __field(dev_t,          dev                     )
726                 __field(u32,            dev_idx                 )
727                 __field(u64,            bucket                  )
728         ),
729
730         TP_fast_assign(
731                 __entry->dev            = c->dev;
732                 __entry->dev_idx        = bucket->inode;
733                 __entry->bucket         = bucket->offset;
734         ),
735
736         TP_printk("%d:%d %u:%llu",
737                   MAJOR(__entry->dev), MINOR(__entry->dev),
738                   __entry->dev_idx, __entry->bucket)
739 );
740
741 DEFINE_EVENT(str, move_extent,
742         TP_PROTO(struct bch_fs *c, const char *k),
743         TP_ARGS(c, k)
744 );
745
746 DEFINE_EVENT(str, move_extent_read,
747         TP_PROTO(struct bch_fs *c, const char *k),
748         TP_ARGS(c, k)
749 );
750
751 DEFINE_EVENT(str, move_extent_write,
752         TP_PROTO(struct bch_fs *c, const char *k),
753         TP_ARGS(c, k)
754 );
755
756 DEFINE_EVENT(str, move_extent_finish,
757         TP_PROTO(struct bch_fs *c, const char *k),
758         TP_ARGS(c, k)
759 );
760
761 TRACE_EVENT(move_extent_fail,
762         TP_PROTO(struct bch_fs *c, const char *msg),
763         TP_ARGS(c, msg),
764
765         TP_STRUCT__entry(
766                 __field(dev_t,          dev                     )
767                 __string(msg,           msg                     )
768         ),
769
770         TP_fast_assign(
771                 __entry->dev            = c->dev;
772                 __assign_str(msg, msg);
773         ),
774
775         TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
776 );
777
778 DEFINE_EVENT(str, move_extent_start_fail,
779         TP_PROTO(struct bch_fs *c, const char *str),
780         TP_ARGS(c, str)
781 );
782
783 TRACE_EVENT(move_data,
784         TP_PROTO(struct bch_fs *c,
785                  struct bch_move_stats *stats),
786         TP_ARGS(c, stats),
787
788         TP_STRUCT__entry(
789                 __field(dev_t,          dev             )
790                 __field(u64,            keys_moved      )
791                 __field(u64,            keys_raced      )
792                 __field(u64,            sectors_seen    )
793                 __field(u64,            sectors_moved   )
794                 __field(u64,            sectors_raced   )
795         ),
796
797         TP_fast_assign(
798                 __entry->dev            = c->dev;
799                 __entry->keys_moved     = atomic64_read(&stats->keys_moved);
800                 __entry->keys_raced     = atomic64_read(&stats->keys_raced);
801                 __entry->sectors_seen   = atomic64_read(&stats->sectors_seen);
802                 __entry->sectors_moved  = atomic64_read(&stats->sectors_moved);
803                 __entry->sectors_raced  = atomic64_read(&stats->sectors_raced);
804         ),
805
806         TP_printk("%d,%d keys moved %llu raced %llu"
807                   "sectors seen %llu moved %llu raced %llu",
808                   MAJOR(__entry->dev), MINOR(__entry->dev),
809                   __entry->keys_moved,
810                   __entry->keys_raced,
811                   __entry->sectors_seen,
812                   __entry->sectors_moved,
813                   __entry->sectors_raced)
814 );
815
816 TRACE_EVENT(evacuate_bucket,
817         TP_PROTO(struct bch_fs *c, struct bpos *bucket,
818                  unsigned sectors, unsigned bucket_size,
819                  u64 fragmentation, int ret),
820         TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
821
822         TP_STRUCT__entry(
823                 __field(dev_t,          dev             )
824                 __field(u64,            member          )
825                 __field(u64,            bucket          )
826                 __field(u32,            sectors         )
827                 __field(u32,            bucket_size     )
828                 __field(u64,            fragmentation   )
829                 __field(int,            ret             )
830         ),
831
832         TP_fast_assign(
833                 __entry->dev                    = c->dev;
834                 __entry->member                 = bucket->inode;
835                 __entry->bucket                 = bucket->offset;
836                 __entry->sectors                = sectors;
837                 __entry->bucket_size            = bucket_size;
838                 __entry->fragmentation          = fragmentation;
839                 __entry->ret                    = ret;
840         ),
841
842         TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
843                   MAJOR(__entry->dev), MINOR(__entry->dev),
844                   __entry->member, __entry->bucket,
845                   __entry->sectors, __entry->bucket_size,
846                   __entry->fragmentation, __entry->ret)
847 );
848
849 TRACE_EVENT(copygc,
850         TP_PROTO(struct bch_fs *c,
851                  u64 sectors_moved, u64 sectors_not_moved,
852                  u64 buckets_moved, u64 buckets_not_moved),
853         TP_ARGS(c,
854                 sectors_moved, sectors_not_moved,
855                 buckets_moved, buckets_not_moved),
856
857         TP_STRUCT__entry(
858                 __field(dev_t,          dev                     )
859                 __field(u64,            sectors_moved           )
860                 __field(u64,            sectors_not_moved       )
861                 __field(u64,            buckets_moved           )
862                 __field(u64,            buckets_not_moved       )
863         ),
864
865         TP_fast_assign(
866                 __entry->dev                    = c->dev;
867                 __entry->sectors_moved          = sectors_moved;
868                 __entry->sectors_not_moved      = sectors_not_moved;
869                 __entry->buckets_moved          = buckets_moved;
870                 __entry->buckets_not_moved = buckets_moved;
871         ),
872
873         TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
874                   MAJOR(__entry->dev), MINOR(__entry->dev),
875                   __entry->sectors_moved, __entry->sectors_not_moved,
876                   __entry->buckets_moved, __entry->buckets_not_moved)
877 );
878
879 TRACE_EVENT(copygc_wait,
880         TP_PROTO(struct bch_fs *c,
881                  u64 wait_amount, u64 until),
882         TP_ARGS(c, wait_amount, until),
883
884         TP_STRUCT__entry(
885                 __field(dev_t,          dev                     )
886                 __field(u64,            wait_amount             )
887                 __field(u64,            until                   )
888         ),
889
890         TP_fast_assign(
891                 __entry->dev            = c->dev;
892                 __entry->wait_amount    = wait_amount;
893                 __entry->until          = until;
894         ),
895
896         TP_printk("%d,%u waiting for %llu sectors until %llu",
897                   MAJOR(__entry->dev), MINOR(__entry->dev),
898                   __entry->wait_amount, __entry->until)
899 );
900
901 /* btree transactions: */
902
903 DECLARE_EVENT_CLASS(transaction_event,
904         TP_PROTO(struct btree_trans *trans,
905                  unsigned long caller_ip),
906         TP_ARGS(trans, caller_ip),
907
908         TP_STRUCT__entry(
909                 __array(char,                   trans_fn, 32    )
910                 __field(unsigned long,          caller_ip       )
911         ),
912
913         TP_fast_assign(
914                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
915                 __entry->caller_ip              = caller_ip;
916         ),
917
918         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
919 );
920
921 DEFINE_EVENT(transaction_event, transaction_commit,
922         TP_PROTO(struct btree_trans *trans,
923                  unsigned long caller_ip),
924         TP_ARGS(trans, caller_ip)
925 );
926
927 DEFINE_EVENT(transaction_event, trans_restart_injected,
928         TP_PROTO(struct btree_trans *trans,
929                  unsigned long caller_ip),
930         TP_ARGS(trans, caller_ip)
931 );
932
933 TRACE_EVENT(trans_restart_split_race,
934         TP_PROTO(struct btree_trans *trans,
935                  unsigned long caller_ip,
936                  struct btree *b),
937         TP_ARGS(trans, caller_ip, b),
938
939         TP_STRUCT__entry(
940                 __array(char,                   trans_fn, 32    )
941                 __field(unsigned long,          caller_ip       )
942                 __field(u8,                     level           )
943                 __field(u16,                    written         )
944                 __field(u16,                    blocks          )
945                 __field(u16,                    u64s_remaining  )
946         ),
947
948         TP_fast_assign(
949                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
950                 __entry->caller_ip              = caller_ip;
951                 __entry->level          = b->c.level;
952                 __entry->written        = b->written;
953                 __entry->blocks         = btree_blocks(trans->c);
954                 __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
955         ),
956
957         TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
958                   __entry->trans_fn, (void *) __entry->caller_ip,
959                   __entry->level,
960                   __entry->written, __entry->blocks,
961                   __entry->u64s_remaining)
962 );
963
964 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
965         TP_PROTO(struct btree_trans *trans,
966                  unsigned long caller_ip),
967         TP_ARGS(trans, caller_ip)
968 );
969
970 TRACE_EVENT(trans_restart_journal_preres_get,
971         TP_PROTO(struct btree_trans *trans,
972                  unsigned long caller_ip,
973                  unsigned flags),
974         TP_ARGS(trans, caller_ip, flags),
975
976         TP_STRUCT__entry(
977                 __array(char,                   trans_fn, 32    )
978                 __field(unsigned long,          caller_ip       )
979                 __field(unsigned,               flags           )
980         ),
981
982         TP_fast_assign(
983                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
984                 __entry->caller_ip              = caller_ip;
985                 __entry->flags                  = flags;
986         ),
987
988         TP_printk("%s %pS %x", __entry->trans_fn,
989                   (void *) __entry->caller_ip,
990                   __entry->flags)
991 );
992
993 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
994         TP_PROTO(struct btree_trans *trans,
995                  unsigned long caller_ip),
996         TP_ARGS(trans, caller_ip)
997 );
998
999 DEFINE_EVENT(transaction_event, trans_traverse_all,
1000         TP_PROTO(struct btree_trans *trans,
1001                  unsigned long caller_ip),
1002         TP_ARGS(trans, caller_ip)
1003 );
1004
1005 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
1006         TP_PROTO(struct btree_trans *trans,
1007                  unsigned long caller_ip),
1008         TP_ARGS(trans, caller_ip)
1009 );
1010
1011 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
1012         TP_PROTO(struct btree_trans *trans,
1013                  unsigned long caller_ip),
1014         TP_ARGS(trans, caller_ip)
1015 );
1016
1017 DECLARE_EVENT_CLASS(transaction_restart_iter,
1018         TP_PROTO(struct btree_trans *trans,
1019                  unsigned long caller_ip,
1020                  struct btree_path *path),
1021         TP_ARGS(trans, caller_ip, path),
1022
1023         TP_STRUCT__entry(
1024                 __array(char,                   trans_fn, 32    )
1025                 __field(unsigned long,          caller_ip       )
1026                 __field(u8,                     btree_id        )
1027                 TRACE_BPOS_entries(pos)
1028         ),
1029
1030         TP_fast_assign(
1031                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1032                 __entry->caller_ip              = caller_ip;
1033                 __entry->btree_id               = path->btree_id;
1034                 TRACE_BPOS_assign(pos, path->pos)
1035         ),
1036
1037         TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1038                   __entry->trans_fn,
1039                   (void *) __entry->caller_ip,
1040                   bch2_btree_id_str(__entry->btree_id),
1041                   __entry->pos_inode,
1042                   __entry->pos_offset,
1043                   __entry->pos_snapshot)
1044 );
1045
1046 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_reused,
1047         TP_PROTO(struct btree_trans *trans,
1048                  unsigned long caller_ip,
1049                  struct btree_path *path),
1050         TP_ARGS(trans, caller_ip, path)
1051 );
1052
1053 DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
1054         TP_PROTO(struct btree_trans *trans,
1055                  unsigned long caller_ip,
1056                  struct btree_path *path),
1057         TP_ARGS(trans, caller_ip, path)
1058 );
1059
1060 struct get_locks_fail;
1061
1062 TRACE_EVENT(trans_restart_upgrade,
1063         TP_PROTO(struct btree_trans *trans,
1064                  unsigned long caller_ip,
1065                  struct btree_path *path,
1066                  unsigned old_locks_want,
1067                  unsigned new_locks_want,
1068                  struct get_locks_fail *f),
1069         TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1070
1071         TP_STRUCT__entry(
1072                 __array(char,                   trans_fn, 32    )
1073                 __field(unsigned long,          caller_ip       )
1074                 __field(u8,                     btree_id        )
1075                 __field(u8,                     old_locks_want  )
1076                 __field(u8,                     new_locks_want  )
1077                 __field(u8,                     level           )
1078                 __field(u32,                    path_seq        )
1079                 __field(u32,                    node_seq        )
1080                 __field(u32,                    path_alloc_seq  )
1081                 __field(u32,                    downgrade_seq)
1082                 TRACE_BPOS_entries(pos)
1083         ),
1084
1085         TP_fast_assign(
1086                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1087                 __entry->caller_ip              = caller_ip;
1088                 __entry->btree_id               = path->btree_id;
1089                 __entry->old_locks_want         = old_locks_want;
1090                 __entry->new_locks_want         = new_locks_want;
1091                 __entry->level                  = f->l;
1092                 __entry->path_seq               = path->l[f->l].lock_seq;
1093                 __entry->node_seq               = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1094                 __entry->path_alloc_seq         = path->alloc_seq;
1095                 __entry->downgrade_seq          = path->downgrade_seq;
1096                 TRACE_BPOS_assign(pos, path->pos)
1097         ),
1098
1099         TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
1100                   __entry->trans_fn,
1101                   (void *) __entry->caller_ip,
1102                   bch2_btree_id_str(__entry->btree_id),
1103                   __entry->pos_inode,
1104                   __entry->pos_offset,
1105                   __entry->pos_snapshot,
1106                   __entry->old_locks_want,
1107                   __entry->new_locks_want,
1108                   __entry->level,
1109                   __entry->path_seq,
1110                   __entry->node_seq,
1111                   __entry->path_alloc_seq,
1112                   __entry->downgrade_seq)
1113 );
1114
1115 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock,
1116         TP_PROTO(struct btree_trans *trans,
1117                  unsigned long caller_ip,
1118                  struct btree_path *path),
1119         TP_ARGS(trans, caller_ip, path)
1120 );
1121
1122 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_next_node,
1123         TP_PROTO(struct btree_trans *trans,
1124                  unsigned long caller_ip,
1125                  struct btree_path *path),
1126         TP_ARGS(trans, caller_ip, path)
1127 );
1128
1129 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_parent_for_fill,
1130         TP_PROTO(struct btree_trans *trans,
1131                  unsigned long caller_ip,
1132                  struct btree_path *path),
1133         TP_ARGS(trans, caller_ip, path)
1134 );
1135
1136 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_after_fill,
1137         TP_PROTO(struct btree_trans *trans,
1138                  unsigned long caller_ip,
1139                  struct btree_path *path),
1140         TP_ARGS(trans, caller_ip, path)
1141 );
1142
1143 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1144         TP_PROTO(struct btree_trans *trans,
1145                  unsigned long caller_ip),
1146         TP_ARGS(trans, caller_ip)
1147 );
1148
1149 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_key_cache_fill,
1150         TP_PROTO(struct btree_trans *trans,
1151                  unsigned long caller_ip,
1152                  struct btree_path *path),
1153         TP_ARGS(trans, caller_ip, path)
1154 );
1155
1156 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path,
1157         TP_PROTO(struct btree_trans *trans,
1158                  unsigned long caller_ip,
1159                  struct btree_path *path),
1160         TP_ARGS(trans, caller_ip, path)
1161 );
1162
1163 DEFINE_EVENT(transaction_restart_iter,  trans_restart_relock_path_intent,
1164         TP_PROTO(struct btree_trans *trans,
1165                  unsigned long caller_ip,
1166                  struct btree_path *path),
1167         TP_ARGS(trans, caller_ip, path)
1168 );
1169
1170 DEFINE_EVENT(transaction_restart_iter,  trans_restart_traverse,
1171         TP_PROTO(struct btree_trans *trans,
1172                  unsigned long caller_ip,
1173                  struct btree_path *path),
1174         TP_ARGS(trans, caller_ip, path)
1175 );
1176
1177 DEFINE_EVENT(transaction_restart_iter,  trans_restart_memory_allocation_failure,
1178         TP_PROTO(struct btree_trans *trans,
1179                  unsigned long caller_ip,
1180                  struct btree_path *path),
1181         TP_ARGS(trans, caller_ip, path)
1182 );
1183
1184 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
1185         TP_PROTO(struct btree_trans *trans,
1186                  unsigned long caller_ip),
1187         TP_ARGS(trans, caller_ip)
1188 );
1189
1190 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1191         TP_PROTO(struct btree_trans *trans,
1192                  unsigned long caller_ip),
1193         TP_ARGS(trans, caller_ip)
1194 );
1195
1196 TRACE_EVENT(trans_restart_would_deadlock_write,
1197         TP_PROTO(struct btree_trans *trans),
1198         TP_ARGS(trans),
1199
1200         TP_STRUCT__entry(
1201                 __array(char,                   trans_fn, 32    )
1202         ),
1203
1204         TP_fast_assign(
1205                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1206         ),
1207
1208         TP_printk("%s", __entry->trans_fn)
1209 );
1210
1211 TRACE_EVENT(trans_restart_mem_realloced,
1212         TP_PROTO(struct btree_trans *trans,
1213                  unsigned long caller_ip,
1214                  unsigned long bytes),
1215         TP_ARGS(trans, caller_ip, bytes),
1216
1217         TP_STRUCT__entry(
1218                 __array(char,                   trans_fn, 32    )
1219                 __field(unsigned long,          caller_ip       )
1220                 __field(unsigned long,          bytes           )
1221         ),
1222
1223         TP_fast_assign(
1224                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1225                 __entry->caller_ip      = caller_ip;
1226                 __entry->bytes          = bytes;
1227         ),
1228
1229         TP_printk("%s %pS bytes %lu",
1230                   __entry->trans_fn,
1231                   (void *) __entry->caller_ip,
1232                   __entry->bytes)
1233 );
1234
1235 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1236         TP_PROTO(struct btree_trans *trans,
1237                  unsigned long caller_ip,
1238                  struct btree_path *path,
1239                  unsigned old_u64s,
1240                  unsigned new_u64s),
1241         TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1242
1243         TP_STRUCT__entry(
1244                 __array(char,                   trans_fn, 32    )
1245                 __field(unsigned long,          caller_ip       )
1246                 __field(enum btree_id,          btree_id        )
1247                 TRACE_BPOS_entries(pos)
1248                 __field(u32,                    old_u64s        )
1249                 __field(u32,                    new_u64s        )
1250         ),
1251
1252         TP_fast_assign(
1253                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1254                 __entry->caller_ip              = caller_ip;
1255
1256                 __entry->btree_id       = path->btree_id;
1257                 TRACE_BPOS_assign(pos, path->pos);
1258                 __entry->old_u64s       = old_u64s;
1259                 __entry->new_u64s       = new_u64s;
1260         ),
1261
1262         TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1263                   __entry->trans_fn,
1264                   (void *) __entry->caller_ip,
1265                   bch2_btree_id_str(__entry->btree_id),
1266                   __entry->pos_inode,
1267                   __entry->pos_offset,
1268                   __entry->pos_snapshot,
1269                   __entry->old_u64s,
1270                   __entry->new_u64s)
1271 );
1272
1273 TRACE_EVENT(path_downgrade,
1274         TP_PROTO(struct btree_trans *trans,
1275                  unsigned long caller_ip,
1276                  struct btree_path *path,
1277                  unsigned old_locks_want),
1278         TP_ARGS(trans, caller_ip, path, old_locks_want),
1279
1280         TP_STRUCT__entry(
1281                 __array(char,                   trans_fn, 32    )
1282                 __field(unsigned long,          caller_ip       )
1283                 __field(unsigned,               old_locks_want  )
1284                 __field(unsigned,               new_locks_want  )
1285                 __field(unsigned,               btree           )
1286                 TRACE_BPOS_entries(pos)
1287         ),
1288
1289         TP_fast_assign(
1290                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1291                 __entry->caller_ip              = caller_ip;
1292                 __entry->old_locks_want         = old_locks_want;
1293                 __entry->new_locks_want         = path->locks_want;
1294                 __entry->btree                  = path->btree_id;
1295                 TRACE_BPOS_assign(pos, path->pos);
1296         ),
1297
1298         TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1299                   __entry->trans_fn,
1300                   (void *) __entry->caller_ip,
1301                   __entry->old_locks_want,
1302                   __entry->new_locks_want,
1303                   bch2_btree_id_str(__entry->btree),
1304                   __entry->pos_inode,
1305                   __entry->pos_offset,
1306                   __entry->pos_snapshot)
1307 );
1308
1309 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1310         TP_PROTO(struct btree_trans *trans,
1311                  unsigned long caller_ip),
1312         TP_ARGS(trans, caller_ip)
1313 );
1314
1315 TRACE_EVENT(write_buffer_flush,
1316         TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1317         TP_ARGS(trans, nr, skipped, fast, size),
1318
1319         TP_STRUCT__entry(
1320                 __field(size_t,         nr              )
1321                 __field(size_t,         skipped         )
1322                 __field(size_t,         fast            )
1323                 __field(size_t,         size            )
1324         ),
1325
1326         TP_fast_assign(
1327                 __entry->nr     = nr;
1328                 __entry->skipped = skipped;
1329                 __entry->fast   = fast;
1330                 __entry->size   = size;
1331         ),
1332
1333         TP_printk("%zu/%zu skipped %zu fast %zu",
1334                   __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1335 );
1336
1337 TRACE_EVENT(write_buffer_flush_sync,
1338         TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1339         TP_ARGS(trans, caller_ip),
1340
1341         TP_STRUCT__entry(
1342                 __array(char,                   trans_fn, 32    )
1343                 __field(unsigned long,          caller_ip       )
1344         ),
1345
1346         TP_fast_assign(
1347                 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1348                 __entry->caller_ip              = caller_ip;
1349         ),
1350
1351         TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1352 );
1353
1354 TRACE_EVENT(write_buffer_flush_slowpath,
1355         TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1356         TP_ARGS(trans, slowpath, total),
1357
1358         TP_STRUCT__entry(
1359                 __field(size_t,         slowpath        )
1360                 __field(size_t,         total           )
1361         ),
1362
1363         TP_fast_assign(
1364                 __entry->slowpath       = slowpath;
1365                 __entry->total          = total;
1366         ),
1367
1368         TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1369 );
1370
1371 DEFINE_EVENT(str, rebalance_extent,
1372         TP_PROTO(struct bch_fs *c, const char *str),
1373         TP_ARGS(c, str)
1374 );
1375
1376 DEFINE_EVENT(str, data_update,
1377         TP_PROTO(struct bch_fs *c, const char *str),
1378         TP_ARGS(c, str)
1379 );
1380
1381 #endif /* _TRACE_BCACHEFS_H */
1382
1383 /* This part must be outside protection */
1384 #undef TRACE_INCLUDE_PATH
1385 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1386
1387 #undef TRACE_INCLUDE_FILE
1388 #define TRACE_INCLUDE_FILE trace
1389
1390 #include <trace/define_trace.h>