forked from Minki/linux
tracing: Remove unused ftrace_cpu_disabled per cpu variable
Since the ring buffer is lockless, there is no need to disable ftrace on
CPU. And no one doing so: after commit 68179686ac
("tracing: Remove
ftrace_disable/enable_cpu()") ftrace_cpu_disabled stays the same after
initialization, nothing changes it.
ftrace_cpu_disabled shouldn't be used by any external module since it
disables only function and graph_function tracers but not any other
tracer.
Link: http://lkml.kernel.org/r/1446836846-22239-1-git-send-email-0x7f454c46@gmail.com
Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
8b1291994d
commit
03e88ae6b3
@ -100,8 +100,6 @@ static DEFINE_PER_CPU(bool, trace_cmdline_save);
|
||||
*/
|
||||
static int tracing_disabled = 1;
|
||||
|
||||
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
|
||||
|
||||
cpumask_var_t __read_mostly tracing_buffer_mask;
|
||||
|
||||
/*
|
||||
@ -1775,10 +1773,6 @@ trace_function(struct trace_array *tr,
|
||||
struct ring_buffer_event *event;
|
||||
struct ftrace_entry *entry;
|
||||
|
||||
/* If we are reading the ring buffer, don't trace */
|
||||
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
||||
flags, pc);
|
||||
if (!event)
|
||||
|
@ -667,7 +667,6 @@ extern int DYN_FTRACE_TEST_NAME2(void);
|
||||
|
||||
extern bool ring_buffer_expanded;
|
||||
extern bool tracing_selftest_disabled;
|
||||
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
extern int trace_selftest_startup_function(struct tracer *trace,
|
||||
|
@ -288,9 +288,6 @@ int __trace_graph_entry(struct trace_array *tr,
|
||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||
struct ftrace_graph_ent_entry *entry;
|
||||
|
||||
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||
return 0;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
@ -403,9 +400,6 @@ void __trace_graph_return(struct trace_array *tr,
|
||||
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
||||
struct ftrace_graph_ret_entry *entry;
|
||||
|
||||
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
|
Loading…
Reference in New Issue
Block a user