]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/trace.h
New upstream release
[bcachefs-tools-debian] / libbcachefs / trace.h
index a743ab47796654a471d4cb1e33d0c6302525b0a5..09a530325dd05e43d18f7e81a3d64a3fbd95c6ea 100644 (file)
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(btree_node,
        TP_printk("%d,%d %u %s %llu:%llu:%u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->level,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
 );
 
@@ -137,6 +137,25 @@ DEFINE_EVENT(bio, read_promote,
        TP_ARGS(bio)
 );
 
+TRACE_EVENT(read_nopromote,
+       TP_PROTO(struct bch_fs *c, int ret),
+       TP_ARGS(c, ret),
+
+       TP_STRUCT__entry(
+               __field(dev_t,          dev             )
+               __array(char,           ret, 32         )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = c->dev;
+               strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
+       ),
+
+       TP_printk("%d,%d ret %s",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->ret)
+);
+
 DEFINE_EVENT(bio, read_bounce,
        TP_PROTO(struct bio *bio),
        TP_ARGS(bio)
@@ -177,10 +196,9 @@ DEFINE_EVENT(bio, journal_write,
 TRACE_EVENT(journal_reclaim_start,
        TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
                 u64 min_nr, u64 min_key_cache,
-                u64 prereserved, u64 prereserved_total,
                 u64 btree_cache_dirty, u64 btree_cache_total,
                 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
-       TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
+       TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
                btree_cache_dirty, btree_cache_total,
                btree_key_cache_dirty, btree_key_cache_total),
 
@@ -190,8 +208,6 @@ TRACE_EVENT(journal_reclaim_start,
                __field(bool,           kicked                  )
                __field(u64,            min_nr                  )
                __field(u64,            min_key_cache           )
-               __field(u64,            prereserved             )
-               __field(u64,            prereserved_total       )
                __field(u64,            btree_cache_dirty       )
                __field(u64,            btree_cache_total       )
                __field(u64,            btree_key_cache_dirty   )
@@ -204,22 +220,18 @@ TRACE_EVENT(journal_reclaim_start,
                __entry->kicked                 = kicked;
                __entry->min_nr                 = min_nr;
                __entry->min_key_cache          = min_key_cache;
-               __entry->prereserved            = prereserved;
-               __entry->prereserved_total      = prereserved_total;
                __entry->btree_cache_dirty      = btree_cache_dirty;
                __entry->btree_cache_total      = btree_cache_total;
                __entry->btree_key_cache_dirty  = btree_key_cache_dirty;
                __entry->btree_key_cache_total  = btree_key_cache_total;
        ),
 
-       TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
+       TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->direct,
                  __entry->kicked,
                  __entry->min_nr,
                  __entry->min_key_cache,
-                 __entry->prereserved,
-                 __entry->prereserved_total,
                  __entry->btree_cache_dirty,
                  __entry->btree_cache_total,
                  __entry->btree_key_cache_dirty,
@@ -403,37 +415,55 @@ TRACE_EVENT(btree_path_relock_fail,
                __field(u8,                     level           )
                TRACE_BPOS_entries(pos)
                __array(char,                   node, 24        )
+               __field(u8,                     self_read_count )
+               __field(u8,                     self_intent_count)
+               __field(u8,                     read_count      )
+               __field(u8,                     intent_count    )
                __field(u32,                    iter_lock_seq   )
                __field(u32,                    node_lock_seq   )
        ),
 
        TP_fast_assign(
                struct btree *b = btree_path_node(path, level);
+               struct six_lock_count c;
 
                strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                __entry->level                  = path->level;
                TRACE_BPOS_assign(pos, path->pos);
-               if (IS_ERR(b))
+
+               c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
+               __entry->self_read_count        = c.n[SIX_LOCK_read];
+               __entry->self_intent_count      = c.n[SIX_LOCK_intent];
+
+               if (IS_ERR(b)) {
                        strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
-               else
+               } else {
+                       c = six_lock_counts(&path->l[level].b->c.lock);
+                       __entry->read_count     = c.n[SIX_LOCK_read];
+                       __entry->intent_count   = c.n[SIX_LOCK_intent];
                        scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
+               }
                __entry->iter_lock_seq          = path->l[level].lock_seq;
                __entry->node_lock_seq          = is_btree_node(path, level)
                        ? six_lock_seq(&path->l[level].b->c.lock)
                        : 0;
        ),
 
-       TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u",
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot,
                  __entry->level,
                  __entry->node,
+                 __entry->self_read_count,
+                 __entry->self_intent_count,
+                 __entry->read_count,
+                 __entry->intent_count,
                  __entry->iter_lock_seq,
                  __entry->node_lock_seq)
 );
@@ -475,7 +505,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
                __entry->self_intent_count      = c.n[SIX_LOCK_intent];
                c = six_lock_counts(&path->l[level].b->c.lock);
                __entry->read_count             = c.n[SIX_LOCK_read];
-               __entry->intent_count           = c.n[SIX_LOCK_read];
+               __entry->intent_count           = c.n[SIX_LOCK_intent];
                __entry->iter_lock_seq          = path->l[level].lock_seq;
                __entry->node_lock_seq          = is_btree_node(path, level)
                        ? six_lock_seq(&path->l[level].b->c.lock)
@@ -485,7 +515,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
        TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot,
@@ -730,25 +760,36 @@ DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
 );
 
 TRACE_EVENT(move_data,
-       TP_PROTO(struct bch_fs *c, u64 sectors_moved,
-                u64 keys_moved),
-       TP_ARGS(c, sectors_moved, keys_moved),
+       TP_PROTO(struct bch_fs *c,
+                struct bch_move_stats *stats),
+       TP_ARGS(c, stats),
 
        TP_STRUCT__entry(
-               __field(dev_t,          dev                     )
-               __field(u64,            sectors_moved   )
+               __field(dev_t,          dev             )
                __field(u64,            keys_moved      )
+               __field(u64,            keys_raced      )
+               __field(u64,            sectors_seen    )
+               __field(u64,            sectors_moved   )
+               __field(u64,            sectors_raced   )
        ),
 
        TP_fast_assign(
-               __entry->dev                    = c->dev;
-               __entry->sectors_moved = sectors_moved;
-               __entry->keys_moved = keys_moved;
+               __entry->dev            = c->dev;
+               __entry->keys_moved     = atomic64_read(&stats->keys_moved);
+               __entry->keys_raced     = atomic64_read(&stats->keys_raced);
+               __entry->sectors_seen   = atomic64_read(&stats->sectors_seen);
+               __entry->sectors_moved  = atomic64_read(&stats->sectors_moved);
+               __entry->sectors_raced  = atomic64_read(&stats->sectors_raced);
        ),
 
-       TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
+       TP_printk("%d,%d keys moved %llu raced %llu"
+                 "sectors seen %llu moved %llu raced %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 __entry->sectors_moved, __entry->keys_moved)
+                 __entry->keys_moved,
+                 __entry->keys_raced,
+                 __entry->sectors_seen,
+                 __entry->sectors_moved,
+                 __entry->sectors_raced)
 );
 
 TRACE_EVENT(evacuate_bucket,
@@ -975,7 +1016,7 @@ DECLARE_EVENT_CLASS(transaction_restart_iter,
        TP_printk("%s %pS btree %s pos %llu:%llu:%u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot)
@@ -995,13 +1036,16 @@ DEFINE_EVENT(transaction_restart_iter,   trans_restart_btree_node_split,
        TP_ARGS(trans, caller_ip, path)
 );
 
+struct get_locks_fail;
+
 TRACE_EVENT(trans_restart_upgrade,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip,
                 struct btree_path *path,
                 unsigned old_locks_want,
-                unsigned new_locks_want),
-       TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
+                unsigned new_locks_want,
+                struct get_locks_fail *f),
+       TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
 
        TP_STRUCT__entry(
                __array(char,                   trans_fn, 32    )
@@ -1009,6 +1053,11 @@ TRACE_EVENT(trans_restart_upgrade,
                __field(u8,                     btree_id        )
                __field(u8,                     old_locks_want  )
                __field(u8,                     new_locks_want  )
+               __field(u8,                     level           )
+               __field(u32,                    path_seq        )
+               __field(u32,                    node_seq        )
+               __field(u32,                    path_alloc_seq  )
+               __field(u32,                    downgrade_seq)
                TRACE_BPOS_entries(pos)
        ),
 
@@ -1018,18 +1067,28 @@ TRACE_EVENT(trans_restart_upgrade,
                __entry->btree_id               = path->btree_id;
                __entry->old_locks_want         = old_locks_want;
                __entry->new_locks_want         = new_locks_want;
+               __entry->level                  = f->l;
+               __entry->path_seq               = path->l[f->l].lock_seq;
+               __entry->node_seq               = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
+               __entry->path_alloc_seq         = path->alloc_seq;
+               __entry->downgrade_seq          = path->downgrade_seq;
                TRACE_BPOS_assign(pos, path->pos)
        ),
 
-       TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot,
                  __entry->old_locks_want,
-                 __entry->new_locks_want)
+                 __entry->new_locks_want,
+                 __entry->level,
+                 __entry->path_seq,
+                 __entry->node_seq,
+                 __entry->path_alloc_seq,
+                 __entry->downgrade_seq)
 );
 
 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
@@ -1182,7 +1241,7 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
        TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot,
@@ -1190,6 +1249,42 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
                  __entry->new_u64s)
 );
 
+TRACE_EVENT(path_downgrade,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path,
+                unsigned old_locks_want),
+       TP_ARGS(trans, caller_ip, path, old_locks_want),
+
+       TP_STRUCT__entry(
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+               __field(unsigned,               old_locks_want  )
+               __field(unsigned,               new_locks_want  )
+               __field(unsigned,               btree           )
+               TRACE_BPOS_entries(pos)
+       ),
+
+       TP_fast_assign(
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+               __entry->old_locks_want         = old_locks_want;
+               __entry->new_locks_want         = path->locks_want;
+               __entry->btree                  = path->btree_id;
+               TRACE_BPOS_assign(pos, path->pos);
+       ),
+
+       TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip,
+                 __entry->old_locks_want,
+                 __entry->new_locks_want,
+                 bch2_btree_id_str(__entry->btree),
+                 __entry->pos_inode,
+                 __entry->pos_offset,
+                 __entry->pos_snapshot)
+);
+
 DEFINE_EVENT(transaction_event,        trans_restart_write_buffer_flush,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip),