tracing/function: Introduce persistent trace option
This patch introduces 'func_ptrace' option, now available in /sys/kernel/debug/tracing/options when function tracer is selected. The patch also adds some tiny code that calls back to pstore to record the trace. The callback is no-op when PSTORE=n. Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
060287b8c4
commit
21f679404a
@@ -13,6 +13,7 @@
|
|||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/ftrace.h>
|
#include <linux/ftrace.h>
|
||||||
|
#include <linux/pstore.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
@@ -74,6 +75,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
|
|||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Our two options */
|
||||||
|
enum {
|
||||||
|
TRACE_FUNC_OPT_STACK = 0x1,
|
||||||
|
TRACE_FUNC_OPT_PSTORE = 0x2,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct tracer_flags func_flags;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
function_trace_call(unsigned long ip, unsigned long parent_ip)
|
function_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||||
{
|
{
|
||||||
@@ -97,6 +106,12 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
|
|||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_inc_return(&data->disabled);
|
||||||
|
|
||||||
if (likely(disabled == 1)) {
|
if (likely(disabled == 1)) {
|
||||||
|
/*
|
||||||
|
* So far tracing doesn't support multiple buffers, so
|
||||||
|
* we make an explicit call for now.
|
||||||
|
*/
|
||||||
|
if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
|
||||||
|
pstore_ftrace_call(ip, parent_ip);
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
trace_function(tr, ip, parent_ip, flags, pc);
|
trace_function(tr, ip, parent_ip, flags, pc);
|
||||||
}
|
}
|
||||||
@@ -158,14 +173,12 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
|
|||||||
.flags = FTRACE_OPS_FL_GLOBAL,
|
.flags = FTRACE_OPS_FL_GLOBAL,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Our two options */
|
|
||||||
enum {
|
|
||||||
TRACE_FUNC_OPT_STACK = 0x1,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct tracer_opt func_opts[] = {
|
static struct tracer_opt func_opts[] = {
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
|
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_PSTORE_FTRACE
|
||||||
|
{ TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
|
||||||
#endif
|
#endif
|
||||||
{ } /* Always set a last empty entry */
|
{ } /* Always set a last empty entry */
|
||||||
};
|
};
|
||||||
@@ -217,6 +230,8 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
|
|||||||
register_ftrace_function(&trace_ops);
|
register_ftrace_function(&trace_ops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
} else if (bit == TRACE_FUNC_OPT_PSTORE) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user