forked from Minki/linux
43dd61c9a0
The new code that allows different utilities to pick and choose what functions they trace broke the :mod: hook that allows users to trace only functions of a particular module. The reason is that the :mod: hook bypasses the hash that is setup to allow individual users to trace their own functions and uses the global hash directly. But if the global hash has not been set up, it will cause a bug: echo '*:mod:radeon' > /sys/kernel/debug/set_ftrace_filter produces: [drm:drm_mode_getfb] *ERROR* invalid framebuffer id [drm:radeon_crtc_page_flip] *ERROR* failed to reserve new rbo buffer before flip BUG: unable to handle kernel paging request at ffffffff8160ec90 IP: [<ffffffff810d9136>] add_hash_entry+0x66/0xd0 PGD 1a05067 PUD 1a09063 PMD 80000000016001e1 Oops: 0003 [#1] SMP Jul 7 04:02:28 phyllis kernel: [55303.858604] CPU 1 Modules linked in: cryptd aes_x86_64 aes_generic binfmt_misc rfcomm bnep ip6table_filter hid radeon r8169 ahci libahci mii ttm drm_kms_helper drm video i2c_algo_bit intel_agp intel_gtt Pid: 10344, comm: bash Tainted: G WC 3.0.0-rc5 #1 Dell Inc. Inspiron N5010/0YXXJJ RIP: 0010:[<ffffffff810d9136>] [<ffffffff810d9136>] add_hash_entry+0x66/0xd0 RSP: 0018:ffff88003a96bda8 EFLAGS: 00010246 RAX: ffff8801301735c0 RBX: ffffffff8160ec80 RCX: 0000000000306ee0 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880137c92940 RBP: ffff88003a96bdb8 R08: ffff880137c95680 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000000 R12: ffffffff81c9df78 R13: ffff8801153d1000 R14: 0000000000000000 R15: 0000000000000000 FS: 00007f329c18a700(0000) GS:ffff880137c80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffffff8160ec90 CR3: 000000003002b000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process bash (pid: 10344, threadinfo ffff88003a96a000, task ffff88012fcfc470) Stack: 0000000000000fd0 00000000000000fc ffff88003a96be38 ffffffff810d92f5 ffff88011c4c4e00 ffff880000000000 000000000b69f4d0 ffffffff8160ec80 ffff8800300e6f06 0000000081130295 0000000000000282 ffff8800300e6f00 Call Trace: [<ffffffff810d92f5>] match_records+0x155/0x1b0 [<ffffffff810d940c>] ftrace_mod_callback+0xbc/0x100 [<ffffffff810dafdf>] ftrace_regex_write+0x16f/0x210 [<ffffffff810db09f>] ftrace_filter_write+0xf/0x20 [<ffffffff81166e48>] vfs_write+0xc8/0x190 [<ffffffff81167001>] sys_write+0x51/0x90 [<ffffffff815c7e02>] system_call_fastpath+0x16/0x1b Code: 48 8b 33 31 d2 48 85 f6 75 33 49 89 d4 4c 03 63 08 49 8b 14 24 48 85 d2 48 89 10 74 04 48 89 42 08 49 89 04 24 4c 89 60 08 31 d2 RIP [<ffffffff810d9136>] add_hash_entry+0x66/0xd0 RSP <ffff88003a96bda8> CR2: ffffffff8160ec90 ---[ end trace a5d031828efdd88e ]--- Reported-by: Brian Marete <marete@toshnix.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
407 lines
8.2 KiB
C
407 lines
8.2 KiB
C
/*
|
|
* ring buffer based function tracer
|
|
*
|
|
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
|
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
|
|
*
|
|
* Based on code from the latency_tracer, that is:
|
|
*
|
|
* Copyright (C) 2004-2006 Ingo Molnar
|
|
* Copyright (C) 2004 William Lee Irwin III
|
|
*/
|
|
#include <linux/ring_buffer.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/fs.h>
|
|
|
|
#include "trace.h"
|
|
|
|
/* function tracing enabled */
|
|
static int ftrace_function_enabled;
|
|
|
|
static struct trace_array *func_trace;
|
|
|
|
static void tracing_start_function_trace(void);
|
|
static void tracing_stop_function_trace(void);
|
|
|
|
static int function_trace_init(struct trace_array *tr)
|
|
{
|
|
func_trace = tr;
|
|
tr->cpu = get_cpu();
|
|
put_cpu();
|
|
|
|
tracing_start_cmdline_record();
|
|
tracing_start_function_trace();
|
|
return 0;
|
|
}
|
|
|
|
static void function_trace_reset(struct trace_array *tr)
|
|
{
|
|
tracing_stop_function_trace();
|
|
tracing_stop_cmdline_record();
|
|
}
|
|
|
|
static void function_trace_start(struct trace_array *tr)
|
|
{
|
|
tracing_reset_online_cpus(tr);
|
|
}
|
|
|
|
static void
|
|
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
|
|
{
|
|
struct trace_array *tr = func_trace;
|
|
struct trace_array_cpu *data;
|
|
unsigned long flags;
|
|
long disabled;
|
|
int cpu;
|
|
int pc;
|
|
|
|
if (unlikely(!ftrace_function_enabled))
|
|
return;
|
|
|
|
pc = preempt_count();
|
|
preempt_disable_notrace();
|
|
local_save_flags(flags);
|
|
cpu = raw_smp_processor_id();
|
|
data = tr->data[cpu];
|
|
disabled = atomic_inc_return(&data->disabled);
|
|
|
|
if (likely(disabled == 1))
|
|
trace_function(tr, ip, parent_ip, flags, pc);
|
|
|
|
atomic_dec(&data->disabled);
|
|
preempt_enable_notrace();
|
|
}
|
|
|
|
static void
|
|
function_trace_call(unsigned long ip, unsigned long parent_ip)
|
|
{
|
|
struct trace_array *tr = func_trace;
|
|
struct trace_array_cpu *data;
|
|
unsigned long flags;
|
|
long disabled;
|
|
int cpu;
|
|
int pc;
|
|
|
|
if (unlikely(!ftrace_function_enabled))
|
|
return;
|
|
|
|
/*
|
|
* Need to use raw, since this must be called before the
|
|
* recursive protection is performed.
|
|
*/
|
|
local_irq_save(flags);
|
|
cpu = raw_smp_processor_id();
|
|
data = tr->data[cpu];
|
|
disabled = atomic_inc_return(&data->disabled);
|
|
|
|
if (likely(disabled == 1)) {
|
|
pc = preempt_count();
|
|
trace_function(tr, ip, parent_ip, flags, pc);
|
|
}
|
|
|
|
atomic_dec(&data->disabled);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static void
|
|
function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
|
|
{
|
|
struct trace_array *tr = func_trace;
|
|
struct trace_array_cpu *data;
|
|
unsigned long flags;
|
|
long disabled;
|
|
int cpu;
|
|
int pc;
|
|
|
|
if (unlikely(!ftrace_function_enabled))
|
|
return;
|
|
|
|
/*
|
|
* Need to use raw, since this must be called before the
|
|
* recursive protection is performed.
|
|
*/
|
|
local_irq_save(flags);
|
|
cpu = raw_smp_processor_id();
|
|
data = tr->data[cpu];
|
|
disabled = atomic_inc_return(&data->disabled);
|
|
|
|
if (likely(disabled == 1)) {
|
|
pc = preempt_count();
|
|
trace_function(tr, ip, parent_ip, flags, pc);
|
|
/*
|
|
* skip over 5 funcs:
|
|
* __ftrace_trace_stack,
|
|
* __trace_stack,
|
|
* function_stack_trace_call
|
|
* ftrace_list_func
|
|
* ftrace_call
|
|
*/
|
|
__trace_stack(tr, flags, 5, pc);
|
|
}
|
|
|
|
atomic_dec(&data->disabled);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
|
|
static struct ftrace_ops trace_ops __read_mostly =
|
|
{
|
|
.func = function_trace_call,
|
|
.flags = FTRACE_OPS_FL_GLOBAL,
|
|
};
|
|
|
|
static struct ftrace_ops trace_stack_ops __read_mostly =
|
|
{
|
|
.func = function_stack_trace_call,
|
|
.flags = FTRACE_OPS_FL_GLOBAL,
|
|
};
|
|
|
|
/* Our two options */
|
|
enum {
|
|
TRACE_FUNC_OPT_STACK = 0x1,
|
|
};
|
|
|
|
static struct tracer_opt func_opts[] = {
|
|
#ifdef CONFIG_STACKTRACE
|
|
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
|
|
#endif
|
|
{ } /* Always set a last empty entry */
|
|
};
|
|
|
|
static struct tracer_flags func_flags = {
|
|
.val = 0, /* By default: all flags disabled */
|
|
.opts = func_opts
|
|
};
|
|
|
|
static void tracing_start_function_trace(void)
|
|
{
|
|
ftrace_function_enabled = 0;
|
|
|
|
if (trace_flags & TRACE_ITER_PREEMPTONLY)
|
|
trace_ops.func = function_trace_call_preempt_only;
|
|
else
|
|
trace_ops.func = function_trace_call;
|
|
|
|
if (func_flags.val & TRACE_FUNC_OPT_STACK)
|
|
register_ftrace_function(&trace_stack_ops);
|
|
else
|
|
register_ftrace_function(&trace_ops);
|
|
|
|
ftrace_function_enabled = 1;
|
|
}
|
|
|
|
static void tracing_stop_function_trace(void)
|
|
{
|
|
ftrace_function_enabled = 0;
|
|
|
|
if (func_flags.val & TRACE_FUNC_OPT_STACK)
|
|
unregister_ftrace_function(&trace_stack_ops);
|
|
else
|
|
unregister_ftrace_function(&trace_ops);
|
|
}
|
|
|
|
static int func_set_flag(u32 old_flags, u32 bit, int set)
|
|
{
|
|
if (bit == TRACE_FUNC_OPT_STACK) {
|
|
/* do nothing if already set */
|
|
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
|
|
return 0;
|
|
|
|
if (set) {
|
|
unregister_ftrace_function(&trace_ops);
|
|
register_ftrace_function(&trace_stack_ops);
|
|
} else {
|
|
unregister_ftrace_function(&trace_stack_ops);
|
|
register_ftrace_function(&trace_ops);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
static struct tracer function_trace __read_mostly =
|
|
{
|
|
.name = "function",
|
|
.init = function_trace_init,
|
|
.reset = function_trace_reset,
|
|
.start = function_trace_start,
|
|
.wait_pipe = poll_wait_pipe,
|
|
.flags = &func_flags,
|
|
.set_flag = func_set_flag,
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
.selftest = trace_selftest_startup_function,
|
|
#endif
|
|
};
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
static void
|
|
ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
|
|
{
|
|
long *count = (long *)data;
|
|
|
|
if (tracing_is_on())
|
|
return;
|
|
|
|
if (!*count)
|
|
return;
|
|
|
|
if (*count != -1)
|
|
(*count)--;
|
|
|
|
tracing_on();
|
|
}
|
|
|
|
static void
|
|
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
|
|
{
|
|
long *count = (long *)data;
|
|
|
|
if (!tracing_is_on())
|
|
return;
|
|
|
|
if (!*count)
|
|
return;
|
|
|
|
if (*count != -1)
|
|
(*count)--;
|
|
|
|
tracing_off();
|
|
}
|
|
|
|
static int
|
|
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
|
|
struct ftrace_probe_ops *ops, void *data);
|
|
|
|
static struct ftrace_probe_ops traceon_probe_ops = {
|
|
.func = ftrace_traceon,
|
|
.print = ftrace_trace_onoff_print,
|
|
};
|
|
|
|
static struct ftrace_probe_ops traceoff_probe_ops = {
|
|
.func = ftrace_traceoff,
|
|
.print = ftrace_trace_onoff_print,
|
|
};
|
|
|
|
static int
|
|
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
|
|
struct ftrace_probe_ops *ops, void *data)
|
|
{
|
|
long count = (long)data;
|
|
|
|
seq_printf(m, "%ps:", (void *)ip);
|
|
|
|
if (ops == &traceon_probe_ops)
|
|
seq_printf(m, "traceon");
|
|
else
|
|
seq_printf(m, "traceoff");
|
|
|
|
if (count == -1)
|
|
seq_printf(m, ":unlimited\n");
|
|
else
|
|
seq_printf(m, ":count=%ld\n", count);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
|
|
{
|
|
struct ftrace_probe_ops *ops;
|
|
|
|
/* we register both traceon and traceoff to this callback */
|
|
if (strcmp(cmd, "traceon") == 0)
|
|
ops = &traceon_probe_ops;
|
|
else
|
|
ops = &traceoff_probe_ops;
|
|
|
|
unregister_ftrace_function_probe_func(glob, ops);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
ftrace_trace_onoff_callback(struct ftrace_hash *hash,
|
|
char *glob, char *cmd, char *param, int enable)
|
|
{
|
|
struct ftrace_probe_ops *ops;
|
|
void *count = (void *)-1;
|
|
char *number;
|
|
int ret;
|
|
|
|
/* hash funcs only work with set_ftrace_filter */
|
|
if (!enable)
|
|
return -EINVAL;
|
|
|
|
if (glob[0] == '!')
|
|
return ftrace_trace_onoff_unreg(glob+1, cmd, param);
|
|
|
|
/* we register both traceon and traceoff to this callback */
|
|
if (strcmp(cmd, "traceon") == 0)
|
|
ops = &traceon_probe_ops;
|
|
else
|
|
ops = &traceoff_probe_ops;
|
|
|
|
if (!param)
|
|
goto out_reg;
|
|
|
|
number = strsep(¶m, ":");
|
|
|
|
if (!strlen(number))
|
|
goto out_reg;
|
|
|
|
/*
|
|
* We use the callback data field (which is a pointer)
|
|
* as our counter.
|
|
*/
|
|
ret = strict_strtoul(number, 0, (unsigned long *)&count);
|
|
if (ret)
|
|
return ret;
|
|
|
|
out_reg:
|
|
ret = register_ftrace_function_probe(glob, ops, count);
|
|
|
|
return ret < 0 ? ret : 0;
|
|
}
|
|
|
|
static struct ftrace_func_command ftrace_traceon_cmd = {
|
|
.name = "traceon",
|
|
.func = ftrace_trace_onoff_callback,
|
|
};
|
|
|
|
static struct ftrace_func_command ftrace_traceoff_cmd = {
|
|
.name = "traceoff",
|
|
.func = ftrace_trace_onoff_callback,
|
|
};
|
|
|
|
static int __init init_func_cmd_traceon(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = register_ftrace_command(&ftrace_traceoff_cmd);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = register_ftrace_command(&ftrace_traceon_cmd);
|
|
if (ret)
|
|
unregister_ftrace_command(&ftrace_traceoff_cmd);
|
|
return ret;
|
|
}
|
|
#else
|
|
static inline int init_func_cmd_traceon(void)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
static __init int init_function_trace(void)
|
|
{
|
|
init_func_cmd_traceon();
|
|
return register_tracer(&function_trace);
|
|
}
|
|
device_initcall(init_function_trace);
|
|
|