forked from Minki/linux
Back in 3.16 the ftrace code was redesigned and cleaned up to remove the
double iteration list (one for registered ftrace ops, and one for registered "global" ops), to just use one list. That simplified the code but also broke the function tracing filtering on pid. This updates the code to handle the filtering again with the new logic. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJVsq1dAAoJEEjnJuOKh9ldfagH/39dixx3tMMElO7pGM9Q3DBE WmJuGSOmeZA1O0balnfcV2SgegEGCSEP6sjBtyuYCfgcFDWyIdAvGrMLS/hKTOxR pilaqyNQz7CROx1zco9gbu5pGwkSkcAfnqYzOg6IpJ0WHtiyH36GMe2wMu29u2VT I7NgSC6ByA82N4pwvgetlUcIDcPTyrkkhGmGPBJGY+diKzeSSo8NlRbv3SNYs0ua V072Oumu64RrZBMdn/Sb2pCF2hf6vhTXD6qS4dbpK/Rfnlblqer9SqUIx2kpg603 yDOmPY7wN9FJ94Te3EeXubLi0LqDJH4iPOndrRn1fYsgMbUpq1BlViF7W7Ajze8= =LQ+S -----END PGP SIGNATURE----- Merge tag 'trace-v4.2-rc2-fix3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull ftrace fix from Steven Rostedt: "Back in 3.16 the ftrace code was redesigned and cleaned up to remove the double iteration list (one for registered ftrace ops, and one for registered "global" ops), to just use one list. That simplified the code but also broke the function tracing filtering on pid. This updates the code to handle the filtering again with the new logic" * tag 'trace-v4.2-rc2-fix3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace: Fix breakage of set_ftrace_pid
This commit is contained in:
commit
763e326c8b
@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
|
||||
* SAVE_REGS. If another ops with this flag set is already registered
|
||||
* for any of the functions that this ops will be registered for, then
|
||||
* this ops will fail to register or set_filter_ip.
|
||||
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
|
||||
*/
|
||||
enum {
|
||||
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
||||
@ -132,6 +133,7 @@ enum {
|
||||
FTRACE_OPS_FL_MODIFYING = 1 << 11,
|
||||
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
|
||||
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
|
||||
FTRACE_OPS_FL_PID = 1 << 14,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
@ -159,6 +161,7 @@ struct ftrace_ops {
|
||||
struct ftrace_ops *next;
|
||||
unsigned long flags;
|
||||
void *private;
|
||||
ftrace_func_t saved_func;
|
||||
int __percpu *disabled;
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
int nr_trampolines;
|
||||
|
@ -98,6 +98,13 @@ struct ftrace_pid {
|
||||
struct pid *pid;
|
||||
};
|
||||
|
||||
static bool ftrace_pids_enabled(void)
|
||||
{
|
||||
return !list_empty(&ftrace_pids);
|
||||
}
|
||||
|
||||
static void ftrace_update_trampoline(struct ftrace_ops *ops);
|
||||
|
||||
/*
|
||||
* ftrace_disabled is set when an anomaly is discovered.
|
||||
* ftrace_disabled is much stronger than ftrace_enabled.
|
||||
@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
|
||||
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
|
||||
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
||||
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
||||
static struct ftrace_ops global_ops;
|
||||
static struct ftrace_ops control_ops;
|
||||
|
||||
@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
|
||||
if (!test_tsk_trace_trace(current))
|
||||
return;
|
||||
|
||||
ftrace_pid_function(ip, parent_ip, op, regs);
|
||||
}
|
||||
|
||||
static void set_ftrace_pid_function(ftrace_func_t func)
|
||||
{
|
||||
/* do not set ftrace_pid_function to itself! */
|
||||
if (func != ftrace_pid_func)
|
||||
ftrace_pid_function = func;
|
||||
op->saved_func(ip, parent_ip, op, regs);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
|
||||
void clear_ftrace_function(void)
|
||||
{
|
||||
ftrace_trace_function = ftrace_stub;
|
||||
ftrace_pid_function = ftrace_stub;
|
||||
}
|
||||
|
||||
static void control_ops_disable_all(struct ftrace_ops *ops)
|
||||
@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
||||
} else
|
||||
add_ftrace_ops(&ftrace_ops_list, ops);
|
||||
|
||||
/* Always save the function, and reset at unregistering */
|
||||
ops->saved_func = ops->func;
|
||||
|
||||
if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
|
||||
ops->func = ftrace_pid_func;
|
||||
|
||||
ftrace_update_trampoline(ops);
|
||||
|
||||
if (ftrace_enabled)
|
||||
@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
||||
if (ftrace_enabled)
|
||||
update_ftrace_function();
|
||||
|
||||
ops->func = ops->saved_func;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ftrace_update_pid_func(void)
|
||||
{
|
||||
bool enabled = ftrace_pids_enabled();
|
||||
struct ftrace_ops *op;
|
||||
|
||||
/* Only do something if we are tracing something */
|
||||
if (ftrace_trace_function == ftrace_stub)
|
||||
return;
|
||||
|
||||
do_for_each_ftrace_op(op, ftrace_ops_list) {
|
||||
if (op->flags & FTRACE_OPS_FL_PID) {
|
||||
op->func = enabled ? ftrace_pid_func :
|
||||
op->saved_func;
|
||||
ftrace_update_trampoline(op);
|
||||
}
|
||||
} while_for_each_ftrace_op(op);
|
||||
|
||||
update_ftrace_function();
|
||||
}
|
||||
|
||||
@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
|
||||
.local_hash.filter_hash = EMPTY_HASH,
|
||||
INIT_OPS_HASH(global_ops)
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
||||
FTRACE_OPS_FL_INITIALIZED,
|
||||
FTRACE_OPS_FL_INITIALIZED |
|
||||
FTRACE_OPS_FL_PID,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
|
||||
|
||||
static struct ftrace_ops global_ops = {
|
||||
.func = ftrace_stub,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
||||
FTRACE_OPS_FL_INITIALIZED |
|
||||
FTRACE_OPS_FL_PID,
|
||||
};
|
||||
|
||||
static int __init ftrace_nodyn_init(void)
|
||||
@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
|
||||
if (WARN_ON(tr->ops->func != ftrace_stub))
|
||||
printk("ftrace ops had %pS for function\n",
|
||||
tr->ops->func);
|
||||
/* Only the top level instance does pid tracing */
|
||||
if (!list_empty(&ftrace_pids)) {
|
||||
set_ftrace_pid_function(func);
|
||||
func = ftrace_pid_func;
|
||||
}
|
||||
}
|
||||
tr->ops->func = func;
|
||||
tr->ops->private = tr;
|
||||
@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
mutex_lock(&ftrace_lock);
|
||||
|
||||
if (list_empty(&ftrace_pids) && (!*pos))
|
||||
if (!ftrace_pids_enabled() && (!*pos))
|
||||
return (void *) 1;
|
||||
|
||||
return seq_list_start(&ftrace_pids, *pos);
|
||||
@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
|
||||
.func = ftrace_stub,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
||||
FTRACE_OPS_FL_INITIALIZED |
|
||||
FTRACE_OPS_FL_PID |
|
||||
FTRACE_OPS_FL_STUB,
|
||||
#ifdef FTRACE_GRAPH_TRAMP_ADDR
|
||||
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
|
||||
|
Loading…
Reference in New Issue
Block a user