]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - include/trace/events/bcachefs.h
Update bcachefs sources to 3e93567c51 bcachefs: Switch to local_clock() for fastpath...
[bcachefs-tools-debian] / include / trace / events / bcachefs.h
index 06cb5ff33b32a44d6ae736c9ccda94260ebf88f7..d3d9e965e7020efb96b4eff9a03f89a19d3a88f6 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM bcachefs
 
@@ -6,21 +7,29 @@
 
 #include <linux/tracepoint.h>
 
+#define TRACE_BPOS_entries(name)                               \
+       __field(u64,                    name##_inode    )       \
+       __field(u64,                    name##_offset   )       \
+       __field(u32,                    name##_snapshot )
+
+#define TRACE_BPOS_assign(dst, src)                            \
+       __entry->dst##_inode            = (src).inode;          \
+       __entry->dst##_offset           = (src).offset;         \
+       __entry->dst##_snapshot         = (src).snapshot
+
 DECLARE_EVENT_CLASS(bpos,
-       TP_PROTO(struct bpos p),
+       TP_PROTO(const struct bpos *p),
        TP_ARGS(p),
 
        TP_STRUCT__entry(
-               __field(u64,    inode                           )
-               __field(u64,    offset                          )
+               TRACE_BPOS_entries(p)
        ),
 
        TP_fast_assign(
-               __entry->inode  = p.inode;
-               __entry->offset = p.offset;
+               TRACE_BPOS_assign(p, *p);
        ),
 
-       TP_printk("%llu:%llu", __entry->inode, __entry->offset)
+       TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
 );
 
 DECLARE_EVENT_CLASS(bkey,
@@ -43,21 +52,29 @@ DECLARE_EVENT_CLASS(bkey,
                  __entry->offset, __entry->size)
 );
 
-DECLARE_EVENT_CLASS(bch_dev,
-       TP_PROTO(struct bch_dev *ca),
-       TP_ARGS(ca),
+DECLARE_EVENT_CLASS(btree_node,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b),
 
        TP_STRUCT__entry(
-               __array(char,           uuid,   16      )
-               __field(unsigned,       tier            )
+               __field(dev_t,          dev                     )
+               __field(u8,             level                   )
+               __field(u8,             btree_id                )
+               TRACE_BPOS_entries(pos)
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, ca->uuid.b, 16);
-               __entry->tier = ca->mi.tier;
+               __entry->dev            = c->dev;
+               __entry->level          = b->c.level;
+               __entry->btree_id       = b->c.btree_id;
+               TRACE_BPOS_assign(pos, b->key.k.p);
        ),
 
-       TP_printk("%pU tier %u", __entry->uuid, __entry->tier)
+       TP_printk("%d,%d %u %s %llu:%llu:%u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->level,
+                 bch2_btree_ids[__entry->btree_id],
+                 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
 );
 
 DECLARE_EVENT_CLASS(bch_fs,
@@ -65,14 +82,14 @@ DECLARE_EVENT_CLASS(bch_fs,
        TP_ARGS(c),
 
        TP_STRUCT__entry(
-               __array(char,           uuid,   16 )
+               __field(dev_t,          dev                     )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
+               __entry->dev            = c->dev;
        ),
 
-       TP_printk("%pU", __entry->uuid)
+       TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
 );
 
 DECLARE_EVENT_CLASS(bio,
@@ -87,10 +104,10 @@ DECLARE_EVENT_CLASS(bio,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio->bi_bdev ? bio_dev(bio) : 0;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
        ),
 
        TP_printk("%d,%d  %s %llu + %u",
@@ -98,26 +115,29 @@ DECLARE_EVENT_CLASS(bio,
                  (unsigned long long)__entry->sector, __entry->nr_sector)
 );
 
-DECLARE_EVENT_CLASS(page_alloc_fail,
-       TP_PROTO(struct bch_fs *c, u64 size),
-       TP_ARGS(c, size),
+/* super-io.c: */
+TRACE_EVENT(write_super,
+       TP_PROTO(struct bch_fs *c, unsigned long ip),
+       TP_ARGS(c, ip),
 
        TP_STRUCT__entry(
-               __array(char,           uuid,   16      )
-               __field(u64,            size            )
+               __field(dev_t,          dev     )
+               __field(unsigned long,  ip      )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
-               __entry->size = size;
+               __entry->dev            = c->dev;
+               __entry->ip             = ip;
        ),
 
-       TP_printk("%pU size %llu", __entry->uuid, __entry->size)
+       TP_printk("%d,%d for %pS",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (void *) __entry->ip)
 );
 
 /* io.c: */
 
-DEFINE_EVENT(bio, read_split,
+DEFINE_EVENT(bio, read_promote,
        TP_PROTO(struct bio *bio),
        TP_ARGS(bio)
 );
@@ -127,42 +147,19 @@ DEFINE_EVENT(bio, read_bounce,
        TP_ARGS(bio)
 );
 
-DEFINE_EVENT(bio, read_retry,
+DEFINE_EVENT(bio, read_split,
        TP_PROTO(struct bio *bio),
        TP_ARGS(bio)
 );
 
-DEFINE_EVENT(bio, promote,
+DEFINE_EVENT(bio, read_retry,
        TP_PROTO(struct bio *bio),
        TP_ARGS(bio)
 );
 
-TRACE_EVENT(write_throttle,
-       TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, u64 delay),
-       TP_ARGS(c, inode, bio, delay),
-
-       TP_STRUCT__entry(
-               __array(char,           uuid,   16              )
-               __field(u64,            inode                   )
-               __field(sector_t,       sector                  )
-               __field(unsigned int,   nr_sector               )
-               __array(char,           rwbs,   6               )
-               __field(u64,            delay                   )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
-               __entry->inode          = inode;
-               __entry->sector         = bio->bi_iter.bi_sector;
-               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
-               __entry->delay          = delay;
-       ),
-
-       TP_printk("%pU inode %llu  %s %llu + %u delay %llu",
-                 __entry->uuid, __entry->inode,
-                 __entry->rwbs, (unsigned long long)__entry->sector,
-                 __entry->nr_sector, __entry->delay)
+DEFINE_EVENT(bio, read_reuse_race,
+       TP_PROTO(struct bio *bio),
+       TP_ARGS(bio)
 );
 
 /* Journal */
@@ -182,53 +179,144 @@ DEFINE_EVENT(bio, journal_write,
        TP_ARGS(bio)
 );
 
+TRACE_EVENT(journal_reclaim_start,
+       TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
+                u64 min_nr, u64 min_key_cache,
+                u64 prereserved, u64 prereserved_total,
+                u64 btree_cache_dirty, u64 btree_cache_total,
+                u64 btree_key_cache_dirty, u64 btree_key_cache_total),
+       TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
+               btree_cache_dirty, btree_cache_total,
+               btree_key_cache_dirty, btree_key_cache_total),
+
+       TP_STRUCT__entry(
+               __field(dev_t,          dev                     )
+               __field(bool,           direct                  )
+               __field(bool,           kicked                  )
+               __field(u64,            min_nr                  )
+               __field(u64,            min_key_cache           )
+               __field(u64,            prereserved             )
+               __field(u64,            prereserved_total       )
+               __field(u64,            btree_cache_dirty       )
+               __field(u64,            btree_cache_total       )
+               __field(u64,            btree_key_cache_dirty   )
+               __field(u64,            btree_key_cache_total   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev                    = c->dev;
+               __entry->direct                 = direct;
+               __entry->kicked                 = kicked;
+               __entry->min_nr                 = min_nr;
+               __entry->min_key_cache          = min_key_cache;
+               __entry->prereserved            = prereserved;
+               __entry->prereserved_total      = prereserved_total;
+               __entry->btree_cache_dirty      = btree_cache_dirty;
+               __entry->btree_cache_total      = btree_cache_total;
+               __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
+               __entry->btree_key_cache_total  = btree_key_cache_total;
+       ),
+
+       TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->direct,
+                 __entry->kicked,
+                 __entry->min_nr,
+                 __entry->min_key_cache,
+                 __entry->prereserved,
+                 __entry->prereserved_total,
+                 __entry->btree_cache_dirty,
+                 __entry->btree_cache_total,
+                 __entry->btree_key_cache_dirty,
+                 __entry->btree_key_cache_total)
+);
+
+TRACE_EVENT(journal_reclaim_finish,
+       TP_PROTO(struct bch_fs *c, u64 nr_flushed),
+       TP_ARGS(c, nr_flushed),
+
+       TP_STRUCT__entry(
+               __field(dev_t,          dev                     )
+               __field(u64,            nr_flushed              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = c->dev;
+               __entry->nr_flushed     = nr_flushed;
+       ),
+
+       TP_printk("%d,%d flushed %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->nr_flushed)
+);
+
 /* bset.c: */
 
 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
-       TP_PROTO(struct bpos p),
+       TP_PROTO(const struct bpos *p),
        TP_ARGS(p)
 );
 
-/* Btree */
+/* Btree cache: */
 
-DECLARE_EVENT_CLASS(btree_node,
-       TP_PROTO(struct bch_fs *c, struct btree *b),
-       TP_ARGS(c, b),
+TRACE_EVENT(btree_cache_scan,
+       TP_PROTO(long nr_to_scan, long can_free, long ret),
+       TP_ARGS(nr_to_scan, can_free, ret),
 
        TP_STRUCT__entry(
-               __array(char,           uuid,           16      )
-               __field(u64,            bucket                  )
-               __field(u8,             level                   )
-               __field(u8,             id                      )
-               __field(u32,            inode                   )
-               __field(u64,            offset                  )
+               __field(long,   nr_to_scan              )
+               __field(long,   can_free                )
+               __field(long,   ret                     )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
-               __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
-               __entry->level          = b->level;
-               __entry->id             = b->btree_id;
-               __entry->inode          = b->key.k.p.inode;
-               __entry->offset         = b->key.k.p.offset;
+               __entry->nr_to_scan     = nr_to_scan;
+               __entry->can_free       = can_free;
+               __entry->ret            = ret;
        ),
 
-       TP_printk("%pU bucket %llu(%u) id %u: %u:%llu",
-                 __entry->uuid, __entry->bucket, __entry->level, __entry->id,
-                 __entry->inode, __entry->offset)
+       TP_printk("scanned for %li nodes, can free %li, ret %li",
+                 __entry->nr_to_scan, __entry->can_free, __entry->ret)
 );
 
-DEFINE_EVENT(btree_node, btree_read,
+DEFINE_EVENT(btree_node, btree_cache_reap,
        TP_PROTO(struct bch_fs *c, struct btree *b),
        TP_ARGS(c, b)
 );
 
-TRACE_EVENT(btree_write,
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
+       TP_PROTO(struct bch_fs *c),
+       TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
+       TP_PROTO(struct bch_fs *c),
+       TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
+       TP_PROTO(struct bch_fs *c),
+       TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
+       TP_PROTO(struct bch_fs *c),
+       TP_ARGS(c)
+);
+
+/* Btree */
+
+DEFINE_EVENT(btree_node, btree_node_read,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b)
+);
+
+TRACE_EVENT(btree_node_write,
        TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
        TP_ARGS(b, bytes, sectors),
 
        TP_STRUCT__entry(
-               __field(enum bkey_type, type)
+               __field(enum btree_node_type,   type)
                __field(unsigned,       bytes                   )
                __field(unsigned,       sectors                 )
        ),
@@ -253,496 +341,758 @@ DEFINE_EVENT(btree_node, btree_node_free,
        TP_ARGS(c, b)
 );
 
-TRACE_EVENT(btree_node_reap,
-       TP_PROTO(struct bch_fs *c, struct btree *b, int ret),
-       TP_ARGS(c, b, ret),
+TRACE_EVENT(btree_reserve_get_fail,
+       TP_PROTO(const char *trans_fn,
+                unsigned long caller_ip,
+                size_t required),
+       TP_ARGS(trans_fn, caller_ip, required),
 
        TP_STRUCT__entry(
-               __field(u64,                    bucket          )
-               __field(int,                    ret             )
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(size_t,                 required        )
        ),
 
        TP_fast_assign(
-               __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
-               __entry->ret = ret;
+               strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip      = caller_ip;
+               __entry->required       = required;
        ),
 
-       TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret)
+       TP_printk("%s %pS required %zu",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 __entry->required)
 );
 
-DECLARE_EVENT_CLASS(btree_node_cannibalize_lock,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c),
+DEFINE_EVENT(btree_node, btree_node_compact,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_merge,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_split,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_rewrite,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_set_root,
+       TP_PROTO(struct bch_fs *c, struct btree *b),
+       TP_ARGS(c, b)
+);
+
+TRACE_EVENT(btree_path_relock_fail,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path,
+                unsigned level),
+       TP_ARGS(trans, caller_ip, path, level),
 
        TP_STRUCT__entry(
-               __array(char,                   uuid,   16      )
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(u8,                     btree_id        )
+               __field(u8,                     level           )
+               TRACE_BPOS_entries(pos)
+               __array(char,                   node, 24        )
+               __field(u32,                    iter_lock_seq   )
+               __field(u32,                    node_lock_seq   )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
+               struct btree *b = btree_path_node(path, level);
+
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+               __entry->btree_id               = path->btree_id;
+               __entry->level                  = path->level;
+               TRACE_BPOS_assign(pos, path->pos);
+               if (IS_ERR(b))
+                       strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
+               else
+                       scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
+               __entry->iter_lock_seq          = path->l[level].lock_seq;
+               __entry->node_lock_seq          = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
        ),
 
-       TP_printk("%pU", __entry->uuid)
-);
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 bch2_btree_ids[__entry->btree_id],
+                 __entry->pos_inode,
+                 __entry->pos_offset,
+                 __entry->pos_snapshot,
+                 __entry->level,
+                 __entry->node,
+                 __entry->iter_lock_seq,
+                 __entry->node_lock_seq)
+);
+
+TRACE_EVENT(btree_path_upgrade_fail,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path,
+                unsigned level),
+       TP_ARGS(trans, caller_ip, path, level),
 
-DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
-);
+       TP_STRUCT__entry(
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(u8,                     btree_id        )
+               __field(u8,                     level           )
+               TRACE_BPOS_entries(pos)
+               __field(u8,                     locked          )
+               __field(u8,                     self_read_count )
+               __field(u8,                     self_intent_count)
+               __field(u8,                     read_count      )
+               __field(u8,                     intent_count    )
+               __field(u32,                    iter_lock_seq   )
+               __field(u32,                    node_lock_seq   )
+       ),
 
-DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+       TP_fast_assign(
+               struct six_lock_count c;
+
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+               __entry->btree_id               = path->btree_id;
+               __entry->level                  = level;
+               TRACE_BPOS_assign(pos, path->pos);
+               __entry->locked                 = btree_node_locked(path, level);
+
+               c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
+               __entry->self_read_count        = c.n[SIX_LOCK_read];
+               __entry->self_intent_count      = c.n[SIX_LOCK_intent];
+               c = six_lock_counts(&path->l[level].b->c.lock);
+               __entry->read_count             = c.n[SIX_LOCK_read];
+               __entry->intent_count           = c.n[SIX_LOCK_read];
+               __entry->iter_lock_seq          = path->l[level].lock_seq;
+               __entry->node_lock_seq          = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
+       ),
+
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 bch2_btree_ids[__entry->btree_id],
+                 __entry->pos_inode,
+                 __entry->pos_offset,
+                 __entry->pos_snapshot,
+                 __entry->level,
+                 __entry->locked,
+                 __entry->self_read_count,
+                 __entry->self_intent_count,
+                 __entry->read_count,
+                 __entry->intent_count,
+                 __entry->iter_lock_seq,
+                 __entry->node_lock_seq)
 );
 
-DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize,
+/* Garbage collection */
+
+DEFINE_EVENT(bch_fs, gc_gens_start,
        TP_PROTO(struct bch_fs *c),
        TP_ARGS(c)
 );
 
-DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
+DEFINE_EVENT(bch_fs, gc_gens_end,
        TP_PROTO(struct bch_fs *c),
        TP_ARGS(c)
 );
 
-TRACE_EVENT(btree_reserve_get_fail,
-       TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
-       TP_ARGS(c, required, cl),
+/* Allocator */
+
+TRACE_EVENT(bucket_alloc,
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
+                bool user, u64 bucket),
+       TP_ARGS(ca, alloc_reserve, user, bucket),
 
        TP_STRUCT__entry(
-               __array(char,                   uuid,   16      )
-               __field(size_t,                 required        )
-               __field(struct closure *,       cl              )
+               __field(dev_t,                  dev     )
+               __array(char,   reserve,        16      )
+               __field(bool,                   user    )
+               __field(u64,                    bucket  )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
-               __entry->required = required;
-               __entry->cl = cl;
+               __entry->dev            = ca->dev;
+               strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+               __entry->user           = user;
+               __entry->bucket         = bucket;
        ),
 
-       TP_printk("%pU required %zu by %p", __entry->uuid,
-                 __entry->required, __entry->cl)
-);
+       TP_printk("%d,%d reserve %s user %u bucket %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->reserve,
+                 __entry->user,
+                 __entry->bucket)
+);
+
+TRACE_EVENT(bucket_alloc_fail,
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
+                u64 free,
+                u64 avail,
+                u64 copygc_wait_amount,
+                s64 copygc_waiting_for,
+                u64 seen,
+                u64 open,
+                u64 need_journal_commit,
+                u64 nouse,
+                bool nonblocking,
+                const char *err),
+       TP_ARGS(ca, alloc_reserve, free, avail, copygc_wait_amount, copygc_waiting_for,
+               seen, open, need_journal_commit, nouse, nonblocking, err),
 
-TRACE_EVENT(btree_insert_key,
-       TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
-       TP_ARGS(c, b, k),
+       TP_STRUCT__entry(
+               __field(dev_t,                  dev                     )
+               __array(char,   reserve,        16                      )
+               __field(u64,                    free                    )
+               __field(u64,                    avail                   )
+               __field(u64,                    copygc_wait_amount      )
+               __field(s64,                    copygc_waiting_for      )
+               __field(u64,                    seen                    )
+               __field(u64,                    open                    )
+               __field(u64,                    need_journal_commit     )
+               __field(u64,                    nouse                   )
+               __field(bool,                   nonblocking             )
+               __array(char,                   err,    32              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ca->dev;
+               strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+               __entry->free           = free;
+               __entry->avail          = avail;
+               __entry->copygc_wait_amount     = copygc_wait_amount;
+               __entry->copygc_waiting_for     = copygc_waiting_for;
+               __entry->seen           = seen;
+               __entry->open           = open;
+               __entry->need_journal_commit = need_journal_commit;
+               __entry->nouse          = nouse;
+               __entry->nonblocking    = nonblocking;
+               strlcpy(__entry->err, err, sizeof(__entry->err));
+       ),
+
+       TP_printk("%d,%d reserve %s free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->reserve,
+                 __entry->free,
+                 __entry->avail,
+                 __entry->copygc_wait_amount,
+                 __entry->copygc_waiting_for,
+                 __entry->seen,
+                 __entry->open,
+                 __entry->need_journal_commit,
+                 __entry->nouse,
+                 __entry->nonblocking,
+                 __entry->err)
+);
+
+TRACE_EVENT(discard_buckets,
+       TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
+                u64 need_journal_commit, u64 discarded, const char *err),
+       TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
 
        TP_STRUCT__entry(
-               __field(u64,            b_bucket                )
-               __field(u64,            b_offset                )
-               __field(u64,            offset                  )
-               __field(u32,            b_inode                 )
-               __field(u32,            inode                   )
-               __field(u32,            size                    )
-               __field(u8,             level                   )
-               __field(u8,             id                      )
+               __field(dev_t,          dev                     )
+               __field(u64,            seen                    )
+               __field(u64,            open                    )
+               __field(u64,            need_journal_commit     )
+               __field(u64,            discarded               )
+               __array(char,           err,    16              )
        ),
 
        TP_fast_assign(
-               __entry->b_bucket       = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
-               __entry->level          = b->level;
-               __entry->id             = b->btree_id;
-               __entry->b_inode        = b->key.k.p.inode;
-               __entry->b_offset       = b->key.k.p.offset;
-               __entry->inode          = k->k.p.inode;
-               __entry->offset         = k->k.p.offset;
-               __entry->size           = k->k.size;
+               __entry->dev                    = c->dev;
+               __entry->seen                   = seen;
+               __entry->open                   = open;
+               __entry->need_journal_commit    = need_journal_commit;
+               __entry->discarded              = discarded;
+               strlcpy(__entry->err, err, sizeof(__entry->err));
        ),
 
-       TP_printk("bucket %llu(%u) id %u: %u:%llu %u:%llu len %u",
-                 __entry->b_bucket, __entry->level, __entry->id,
-                 __entry->b_inode, __entry->b_offset,
-                 __entry->inode, __entry->offset, __entry->size)
+       TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->seen,
+                 __entry->open,
+                 __entry->need_journal_commit,
+                 __entry->discarded,
+                 __entry->err)
 );
 
-DECLARE_EVENT_CLASS(btree_split,
-       TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
-       TP_ARGS(c, b, keys),
+TRACE_EVENT(bucket_invalidate,
+       TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
+       TP_ARGS(c, dev, bucket, sectors),
 
        TP_STRUCT__entry(
+               __field(dev_t,          dev                     )
+               __field(u32,            dev_idx                 )
+               __field(u32,            sectors                 )
                __field(u64,            bucket                  )
-               __field(u8,             level                   )
-               __field(u8,             id                      )
-               __field(u32,            inode                   )
-               __field(u64,            offset                  )
-               __field(u32,            keys                    )
        ),
 
        TP_fast_assign(
-               __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
-               __entry->level  = b->level;
-               __entry->id     = b->btree_id;
-               __entry->inode  = b->key.k.p.inode;
-               __entry->offset = b->key.k.p.offset;
-               __entry->keys   = keys;
+               __entry->dev            = c->dev;
+               __entry->dev_idx        = dev;
+               __entry->sectors        = sectors;
+               __entry->bucket         = bucket;
        ),
 
-       TP_printk("bucket %llu(%u) id %u: %u:%llu keys %u",
-                 __entry->bucket, __entry->level, __entry->id,
-                 __entry->inode, __entry->offset, __entry->keys)
+       TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->dev_idx, __entry->bucket,
+                 __entry->sectors)
 );
 
-DEFINE_EVENT(btree_split, btree_node_split,
-       TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
-       TP_ARGS(c, b, keys)
+/* Moving IO */
+
+DEFINE_EVENT(bkey, move_extent_read,
+       TP_PROTO(const struct bkey *k),
+       TP_ARGS(k)
 );
 
-DEFINE_EVENT(btree_split, btree_node_compact,
-       TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
-       TP_ARGS(c, b, keys)
+DEFINE_EVENT(bkey, move_extent_write,
+       TP_PROTO(const struct bkey *k),
+       TP_ARGS(k)
 );
 
-DEFINE_EVENT(btree_node, btree_set_root,
-       TP_PROTO(struct bch_fs *c, struct btree *b),
-       TP_ARGS(c, b)
+DEFINE_EVENT(bkey, move_extent_finish,
+       TP_PROTO(const struct bkey *k),
+       TP_ARGS(k)
 );
 
-/* Garbage collection */
+DEFINE_EVENT(bkey, move_extent_race,
+       TP_PROTO(const struct bkey *k),
+       TP_ARGS(k)
+);
+
+DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
+       TP_PROTO(const struct bkey *k),
+       TP_ARGS(k)
+);
 
-TRACE_EVENT(btree_gc_coalesce,
-       TP_PROTO(struct bch_fs *c, struct btree *b, unsigned nodes),
-       TP_ARGS(c, b, nodes),
+TRACE_EVENT(move_data,
+       TP_PROTO(struct bch_fs *c, u64 sectors_moved,
+                u64 keys_moved),
+       TP_ARGS(c, sectors_moved, keys_moved),
 
        TP_STRUCT__entry(
-               __field(u64,            bucket                  )
-               __field(u8,             level                   )
-               __field(u8,             id                      )
-               __field(u32,            inode                   )
-               __field(u64,            offset                  )
-               __field(unsigned,       nodes                   )
+               __field(dev_t,          dev                     )
+               __field(u64,            sectors_moved   )
+               __field(u64,            keys_moved      )
        ),
 
        TP_fast_assign(
-               __entry->bucket         = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
-               __entry->level          = b->level;
-               __entry->id             = b->btree_id;
-               __entry->inode          = b->key.k.p.inode;
-               __entry->offset         = b->key.k.p.offset;
-               __entry->nodes          = nodes;
+               __entry->dev                    = c->dev;
+               __entry->sectors_moved = sectors_moved;
+               __entry->keys_moved = keys_moved;
        ),
 
-       TP_printk("bucket %llu(%u) id %u: %u:%llu nodes %u",
-                 __entry->bucket, __entry->level, __entry->id,
-                 __entry->inode, __entry->offset, __entry->nodes)
+       TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->sectors_moved, __entry->keys_moved)
 );
 
-TRACE_EVENT(btree_gc_coalesce_fail,
-       TP_PROTO(struct bch_fs *c, int reason),
-       TP_ARGS(c, reason),
+TRACE_EVENT(copygc,
+       TP_PROTO(struct bch_fs *c,
+                u64 sectors_moved, u64 sectors_not_moved,
+                u64 buckets_moved, u64 buckets_not_moved),
+       TP_ARGS(c,
+               sectors_moved, sectors_not_moved,
+               buckets_moved, buckets_not_moved),
 
        TP_STRUCT__entry(
-               __field(u8,             reason                  )
-               __array(char,           uuid,   16              )
+               __field(dev_t,          dev                     )
+               __field(u64,            sectors_moved           )
+               __field(u64,            sectors_not_moved       )
+               __field(u64,            buckets_moved           )
+               __field(u64,            buckets_not_moved       )
        ),
 
        TP_fast_assign(
-               __entry->reason         = reason;
-               memcpy(__entry->uuid, c->disk_sb->user_uuid.b, 16);
+               __entry->dev                    = c->dev;
+               __entry->sectors_moved          = sectors_moved;
+               __entry->sectors_not_moved      = sectors_not_moved;
+               __entry->buckets_moved          = buckets_moved;
+               __entry->buckets_not_moved = buckets_moved;
        ),
 
-       TP_printk("%pU: %u", __entry->uuid, __entry->reason)
+       TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->sectors_moved, __entry->sectors_not_moved,
+                 __entry->buckets_moved, __entry->buckets_not_moved)
 );
 
-DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
-       TP_PROTO(struct bch_fs *c, struct btree *b),
-       TP_ARGS(c, b)
-);
+TRACE_EVENT(copygc_wait,
+       TP_PROTO(struct bch_fs *c,
+                u64 wait_amount, u64 until),
+       TP_ARGS(c, wait_amount, until),
 
-DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
-       TP_PROTO(struct bch_fs *c, struct btree *b),
-       TP_ARGS(c, b)
-);
+       TP_STRUCT__entry(
+               __field(dev_t,          dev                     )
+               __field(u64,            wait_amount             )
+               __field(u64,            until                   )
+       ),
 
-DEFINE_EVENT(bch_fs, gc_start,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
-);
+       TP_fast_assign(
+               __entry->dev            = c->dev;
+               __entry->wait_amount    = wait_amount;
+               __entry->until          = until;
+       ),
 
-DEFINE_EVENT(bch_fs, gc_end,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+       TP_printk("%d,%u waiting for %llu sectors until %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->wait_amount, __entry->until)
 );
 
-DEFINE_EVENT(bch_fs, gc_coalesce_start,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+/* btree transactions: */
+
+DECLARE_EVENT_CLASS(transaction_event,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip),
+
+       TP_STRUCT__entry(
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+       ),
+
+       TP_fast_assign(
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+       ),
+
+       TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
 );
 
-DEFINE_EVENT(bch_fs, gc_coalesce_end,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+DEFINE_EVENT(transaction_event,        transaction_commit,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-DEFINE_EVENT(bch_dev, sectors_saturated,
-       TP_PROTO(struct bch_dev *ca),
-       TP_ARGS(ca)
+DEFINE_EVENT(transaction_event,        trans_restart_injected,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-DEFINE_EVENT(bch_fs, gc_sectors_saturated,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+DEFINE_EVENT(transaction_event,        trans_blocked_journal_reclaim,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+DEFINE_EVENT(transaction_event,        trans_restart_journal_res_get,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-/* Allocator */
 
-TRACE_EVENT(alloc_batch,
-       TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
-       TP_ARGS(ca, free, total),
+TRACE_EVENT(trans_restart_journal_preres_get,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                unsigned flags),
+       TP_ARGS(trans, caller_ip, flags),
 
        TP_STRUCT__entry(
-               __array(char,           uuid,   16      )
-               __field(size_t,         free            )
-               __field(size_t,         total           )
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(unsigned,               flags           )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, ca->uuid.b, 16);
-               __entry->free = free;
-               __entry->total = total;
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+               __entry->flags                  = flags;
        ),
 
-       TP_printk("%pU free %zu total %zu",
-               __entry->uuid, __entry->free, __entry->total)
+       TP_printk("%s %pS %x", __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 __entry->flags)
 );
 
-DEFINE_EVENT(bch_dev, prio_write_start,
-       TP_PROTO(struct bch_dev *ca),
-       TP_ARGS(ca)
+DEFINE_EVENT(transaction_event,        trans_restart_journal_reclaim,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-DEFINE_EVENT(bch_dev, prio_write_end,
-       TP_PROTO(struct bch_dev *ca),
-       TP_ARGS(ca)
+DEFINE_EVENT(transaction_event,        trans_restart_fault_inject,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-TRACE_EVENT(invalidate,
-       TP_PROTO(struct bch_dev *ca, size_t bucket, unsigned sectors),
-       TP_ARGS(ca, bucket, sectors),
-
-       TP_STRUCT__entry(
-               __field(unsigned,       sectors                 )
-               __field(dev_t,          dev                     )
-               __field(__u64,          offset                  )
-       ),
+DEFINE_EVENT(transaction_event,        trans_traverse_all,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
+);
 
-       TP_fast_assign(
-               __entry->dev            = ca->disk_sb.bdev->bd_dev;
-               __entry->offset         = bucket << ca->bucket_bits;
-               __entry->sectors        = sectors;
-       ),
+DEFINE_EVENT(transaction_event,        trans_restart_mark_replicas,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
+);
 
-       TP_printk("invalidated %u sectors at %d,%d sector=%llu",
-                 __entry->sectors, MAJOR(__entry->dev),
-                 MINOR(__entry->dev), __entry->offset)
+DEFINE_EVENT(transaction_event,        trans_restart_key_cache_raced,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-DEFINE_EVENT(bch_fs, rescale_prios,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+DEFINE_EVENT(transaction_event,        trans_restart_too_many_iters,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-DECLARE_EVENT_CLASS(bucket_alloc,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve),
+DECLARE_EVENT_CLASS(transaction_restart_iter,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path),
 
        TP_STRUCT__entry(
-               __array(char,                   uuid,   16)
-               __field(enum alloc_reserve,     reserve   )
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(u8,                     btree_id        )
+               TRACE_BPOS_entries(pos)
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, ca->uuid.b, 16);
-               __entry->reserve = reserve;
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+               __entry->btree_id               = path->btree_id;
+               TRACE_BPOS_assign(pos, path->pos)
        ),
 
-       TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 bch2_btree_ids[__entry->btree_id],
+                 __entry->pos_inode,
+                 __entry->pos_offset,
+                 __entry->pos_snapshot)
 );
 
-DEFINE_EVENT(bucket_alloc, bucket_alloc,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-TRACE_EVENT(freelist_empty_fail,
-       TP_PROTO(struct bch_fs *c, enum alloc_reserve reserve,
-                struct closure *cl),
-       TP_ARGS(c, reserve, cl),
+TRACE_EVENT(trans_restart_upgrade,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path,
+                unsigned old_locks_want,
+                unsigned new_locks_want),
+       TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
 
        TP_STRUCT__entry(
-               __array(char,                   uuid,   16      )
-               __field(enum alloc_reserve,     reserve         )
-               __field(struct closure *,       cl              )
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(u8,                     btree_id        )
+               __field(u8,                     old_locks_want  )
+               __field(u8,                     new_locks_want  )
+               TRACE_BPOS_entries(pos)
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
-               __entry->reserve = reserve;
-               __entry->cl = cl;
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+               __entry->btree_id               = path->btree_id;
+               __entry->old_locks_want         = old_locks_want;
+               __entry->new_locks_want         = new_locks_want;
+               TRACE_BPOS_assign(pos, path->pos)
        ),
 
-       TP_printk("%pU reserve %d cl %p", __entry->uuid, __entry->reserve,
-                 __entry->cl)
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 bch2_btree_ids[__entry->btree_id],
+                 __entry->pos_inode,
+                 __entry->pos_offset,
+                 __entry->pos_snapshot,
+                 __entry->old_locks_want,
+                 __entry->new_locks_want)
 );
 
-DECLARE_EVENT_CLASS(open_bucket_alloc,
-       TP_PROTO(struct bch_fs *c, struct closure *cl),
-       TP_ARGS(c, cl),
-
-       TP_STRUCT__entry(
-               __array(char,                   uuid,   16      )
-               __field(struct closure *,       cl              )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
-               __entry->cl = cl;
-       ),
-
-       TP_printk("%pU cl %p",
-                 __entry->uuid, __entry->cl)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc,
-       TP_PROTO(struct bch_fs *c, struct closure *cl),
-       TP_ARGS(c, cl)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc_fail,
-       TP_PROTO(struct bch_fs *c, struct closure *cl),
-       TP_ARGS(c, cl)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-/* Moving IO */
-
-DECLARE_EVENT_CLASS(moving_io,
-       TP_PROTO(struct bkey *k),
-       TP_ARGS(k),
-
-       TP_STRUCT__entry(
-               __field(__u32,          inode                   )
-               __field(__u64,          offset                  )
-               __field(__u32,          sectors                 )
-       ),
-
-       TP_fast_assign(
-               __entry->inode          = k->p.inode;
-               __entry->offset         = k->p.offset;
-               __entry->sectors        = k->size;
-       ),
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
+);
 
-       TP_printk("%u:%llu sectors %u",
-                 __entry->inode, __entry->offset, __entry->sectors)
+DEFINE_EVENT(transaction_event,        trans_restart_key_cache_upgrade,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-DEFINE_EVENT(moving_io, move_read,
-       TP_PROTO(struct bkey *k),
-       TP_ARGS(k)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-DEFINE_EVENT(moving_io, move_read_done,
-       TP_PROTO(struct bkey *k),
-       TP_ARGS(k)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-DEFINE_EVENT(moving_io, move_write,
-       TP_PROTO(struct bkey *k),
-       TP_ARGS(k)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-DEFINE_EVENT(moving_io, copy_collision,
-       TP_PROTO(struct bkey *k),
-       TP_ARGS(k)
+DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
 );
 
-/* Copy GC */
+DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path)
+);
 
-DEFINE_EVENT(page_alloc_fail, moving_gc_alloc_fail,
-       TP_PROTO(struct bch_fs *c, u64 size),
-       TP_ARGS(c, size)
+DEFINE_EVENT(transaction_event,        trans_restart_would_deadlock,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-DEFINE_EVENT(bch_dev, moving_gc_start,
-       TP_PROTO(struct bch_dev *ca),
-       TP_ARGS(ca)
+DEFINE_EVENT(transaction_event,        trans_restart_would_deadlock_recursion_limit,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip),
+       TP_ARGS(trans, caller_ip)
 );
 
-TRACE_EVENT(moving_gc_end,
-       TP_PROTO(struct bch_dev *ca, u64 sectors_moved, u64 keys_moved,
-               u64 buckets_moved),
-       TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
+TRACE_EVENT(trans_restart_would_deadlock_write,
+       TP_PROTO(struct btree_trans *trans),
+       TP_ARGS(trans),
 
        TP_STRUCT__entry(
-               __array(char,           uuid,   16      )
-               __field(u64,            sectors_moved   )
-               __field(u64,            keys_moved      )
-               __field(u64,            buckets_moved   )
+               __array(char,                   trans_fn, 32    )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, ca->uuid.b, 16);
-               __entry->sectors_moved = sectors_moved;
-               __entry->keys_moved = keys_moved;
-               __entry->buckets_moved = buckets_moved;
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
        ),
 
-       TP_printk("%pU sectors_moved %llu keys_moved %llu buckets_moved %llu",
-               __entry->uuid, __entry->sectors_moved, __entry->keys_moved,
-               __entry->buckets_moved)
+       TP_printk("%s", __entry->trans_fn)
 );
 
-DEFINE_EVENT(bkey, gc_copy,
-       TP_PROTO(const struct bkey *k),
-       TP_ARGS(k)
-);
+TRACE_EVENT(trans_restart_mem_realloced,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                unsigned long bytes),
+       TP_ARGS(trans, caller_ip, bytes),
 
-/* Tiering */
+       TP_STRUCT__entry(
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(unsigned long,          bytes           )
+       ),
 
-DEFINE_EVENT(page_alloc_fail, tiering_alloc_fail,
-       TP_PROTO(struct bch_fs *c, u64 size),
-       TP_ARGS(c, size)
-);
+       TP_fast_assign(
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip      = caller_ip;
+               __entry->bytes          = bytes;
+       ),
 
-DEFINE_EVENT(bch_fs, tiering_start,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+       TP_printk("%s %pS bytes %lu",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 __entry->bytes)
 );
 
-TRACE_EVENT(tiering_end,
-       TP_PROTO(struct bch_fs *c, u64 sectors_moved,
-               u64 keys_moved),
-       TP_ARGS(c, sectors_moved, keys_moved),
+TRACE_EVENT(trans_restart_key_cache_key_realloced,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path,
+                unsigned old_u64s,
+                unsigned new_u64s),
+       TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
 
        TP_STRUCT__entry(
-               __array(char,           uuid,   16      )
-               __field(u64,            sectors_moved   )
-               __field(u64,            keys_moved      )
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(enum btree_id,          btree_id        )
+               TRACE_BPOS_entries(pos)
+               __field(u32,                    old_u64s        )
+               __field(u32,                    new_u64s        )
        ),
 
        TP_fast_assign(
-               memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
-               __entry->sectors_moved = sectors_moved;
-               __entry->keys_moved = keys_moved;
-       ),
+               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
 
-       TP_printk("%pU sectors_moved %llu keys_moved %llu",
-               __entry->uuid, __entry->sectors_moved, __entry->keys_moved)
-);
+               __entry->btree_id       = path->btree_id;
+               TRACE_BPOS_assign(pos, path->pos);
+               __entry->old_u64s       = old_u64s;
+               __entry->new_u64s       = new_u64s;
+       ),
 
-DEFINE_EVENT(bkey, tiering_copy,
-       TP_PROTO(const struct bkey *k),
-       TP_ARGS(k)
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 bch2_btree_ids[__entry->btree_id],
+                 __entry->pos_inode,
+                 __entry->pos_offset,
+                 __entry->pos_snapshot,
+                 __entry->old_u64s,
+                 __entry->new_u64s)
 );
 
 #endif /* _TRACE_BCACHE_H */