bcachefs: Tracepoint improvements

This improves the transaction restart tracepoints - adding distinct
tracepoints for all the locations and reasons a transaction might have
been restarted, and ensures that there's a tracepoint for every
transaction restart.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2022-01-08 22:59:58 -05:00 committed by Kent Overstreet
parent 7f6ff935f7
commit bc82d08bae
4 changed files with 127 additions and 15 deletions

View File

@ -665,6 +665,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* been freed:
*/
if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
trace_trans_restart_relock_parent_for_fill(trans->fn,
_THIS_IP_, btree_id, &path->pos);
btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}
@ -712,6 +714,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
trace_trans_restart_relock_after_fill(trans->fn, _THIS_IP_,
btree_id, &path->pos);
btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}

View File

@ -166,19 +166,25 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
int want = __btree_lock_want(path, level);
if (!is_btree_node(path, level))
return false;
goto fail;
if (race_fault())
return false;
goto fail;
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) {
mark_btree_node_locked(path, level, want);
return true;
} else {
return false;
}
fail:
trace_btree_node_relock_fail(trans->fn, _RET_IP_,
path->btree_id,
&path->pos,
(unsigned long) b,
path->l[level].lock_seq,
is_btree_node(path, level) ? b->c.lock.state.seq : 0);
return false;
}
bool bch2_btree_node_upgrade(struct btree_trans *trans,
@ -225,7 +231,7 @@ success:
static inline bool btree_path_get_locks(struct btree_trans *trans,
struct btree_path *path,
bool upgrade, unsigned long trace_ip)
bool upgrade)
{
unsigned l = path->level;
int fail_idx = -1;
@ -427,6 +433,8 @@ bool bch2_btree_path_relock_intent(struct btree_trans *trans,
if (!bch2_btree_node_relock(trans, path, l)) {
__bch2_btree_path_unlock(path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
path->btree_id, &path->pos);
btree_trans_restart(trans);
return false;
}
@ -439,10 +447,13 @@ __flatten
static bool bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
bool ret = btree_path_get_locks(trans, path, false, trace_ip);
bool ret = btree_path_get_locks(trans, path, false);
if (!ret)
if (!ret) {
trace_trans_restart_relock_path(trans->fn, trace_ip,
path->btree_id, &path->pos);
btree_trans_restart(trans);
}
return ret;
}
@ -456,7 +467,7 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
path->locks_want = new_locks_want;
if (btree_path_get_locks(trans, path, true, _THIS_IP_))
if (btree_path_get_locks(trans, path, true))
return true;
/*
@ -484,7 +495,7 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
linked->btree_id == path->btree_id &&
linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want;
btree_path_get_locks(trans, linked, true, _THIS_IP_);
btree_path_get_locks(trans, linked, true);
}
return false;
@ -1955,7 +1966,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
locks_want = min(locks_want, BTREE_MAX_DEPTH);
if (locks_want > path->locks_want) {
path->locks_want = locks_want;
btree_path_get_locks(trans, path, true, _THIS_IP_);
btree_path_get_locks(trans, path, true);
}
return path;
@ -2090,6 +2101,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
__bch2_btree_path_unlock(path);
path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
path->btree_id, &path->pos);
btree_trans_restart(trans);
ret = -EINTR;
goto err;

View File

@ -223,7 +223,8 @@ static int btree_key_cache_fill(struct btree_trans *trans,
goto err;
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
trace_transaction_restart_ip(trans->fn, _THIS_IP_);
trace_trans_restart_relock_key_cache_fill(trans->fn,
_THIS_IP_, ck_path->btree_id, &ck_path->pos);
ret = btree_trans_restart(trans);
goto err;
}

View File

@ -346,6 +346,52 @@ TRACE_EVENT(btree_cache_scan,
__entry->ret)
);
TRACE_EVENT(btree_node_relock_fail,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos,
unsigned long node,
u32 iter_lock_seq,
u32 node_lock_seq),
TP_ARGS(trans_fn, caller_ip, btree_id, pos, node, iter_lock_seq, node_lock_seq),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__array(char, caller, 32 )
__field(u8, btree_id )
__field(u64, pos_inode )
__field(u64, pos_offset )
__field(u32, pos_snapshot )
__field(unsigned long, node )
__field(u32, iter_lock_seq )
__field(u32, node_lock_seq )
),
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
snprintf(__entry->caller, sizeof(__entry->caller), "%pS", (void *) caller_ip);
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
__entry->node = node;
__entry->iter_lock_seq = iter_lock_seq;
__entry->node_lock_seq = node_lock_seq;
),
TP_printk("%s %s btree %u pos %llu:%llu:%u, node %lu iter seq %u lock seq %u",
__entry->trans_fn,
__entry->caller,
__entry->btree_id,
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->node,
__entry->iter_lock_seq,
__entry->node_lock_seq)
);
/* Garbage collection */
DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
@ -621,7 +667,7 @@ DECLARE_EVENT_CLASS(transaction_restart_iter,
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__field(unsigned long, caller_ip )
__array(char, caller, 32 )
__field(u8, btree_id )
__field(u64, pos_inode )
__field(u64, pos_offset )
@ -630,16 +676,16 @@ DECLARE_EVENT_CLASS(transaction_restart_iter,
TP_fast_assign(
strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
snprintf(__entry->caller, sizeof(__entry->caller), "%pS", (void *) caller_ip);
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
),
TP_printk("%s %pS btree %u pos %llu:%llu:%u",
TP_printk("%s %s btree %u pos %llu:%llu:%u",
__entry->trans_fn,
(void *) __entry->caller_ip,
__entry->caller,
__entry->btree_id,
__entry->pos_inode,
__entry->pos_offset,
@ -694,6 +740,54 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,