mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
perf: generalize perf_callchain
. avoid walking the stack when there is no room left in the buffer . generalize get_perf_callchain() to be called from bpf helper Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6b83d28a55
commit
568b329a02
@ -37,7 +37,7 @@ print_context_stack_bp(struct thread_info *tinfo,
|
||||
/* Generic stack tracer with callbacks */
|
||||
|
||||
struct stacktrace_ops {
|
||||
void (*address)(void *data, unsigned long address, int reliable);
|
||||
int (*address)(void *data, unsigned long address, int reliable);
|
||||
/* On negative return stop dumping */
|
||||
int (*stack)(void *data, char *name);
|
||||
walk_stack_t walk_stack;
|
||||
|
@ -2180,11 +2180,11 @@ static int backtrace_stack(void *data, char *name)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void backtrace_address(void *data, unsigned long addr, int reliable)
|
||||
static int backtrace_address(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
|
||||
perf_callchain_store(entry, addr);
|
||||
return perf_callchain_store(entry, addr);
|
||||
}
|
||||
|
||||
static const struct stacktrace_ops backtrace_ops = {
|
||||
|
@ -135,7 +135,8 @@ print_context_stack_bp(struct thread_info *tinfo,
|
||||
if (!__kernel_text_address(addr))
|
||||
break;
|
||||
|
||||
ops->address(data, addr, 1);
|
||||
if (ops->address(data, addr, 1))
|
||||
break;
|
||||
frame = frame->next_frame;
|
||||
ret_addr = &frame->return_address;
|
||||
print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
|
||||
@ -154,10 +155,11 @@ static int print_trace_stack(void *data, char *name)
|
||||
/*
|
||||
* Print one address/symbol entries per line.
|
||||
*/
|
||||
static void print_trace_address(void *data, unsigned long addr, int reliable)
|
||||
static int print_trace_address(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
touch_nmi_watchdog();
|
||||
printk_stack_address(addr, reliable, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct stacktrace_ops print_trace_ops = {
|
||||
|
@ -14,30 +14,34 @@ static int save_stack_stack(void *data, char *name)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
|
||||
{
|
||||
struct stack_trace *trace = data;
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
if (!reliable)
|
||||
return;
|
||||
return 0;
|
||||
#endif
|
||||
if (nosched && in_sched_functions(addr))
|
||||
return;
|
||||
return 0;
|
||||
if (trace->skip > 0) {
|
||||
trace->skip--;
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
if (trace->nr_entries < trace->max_entries)
|
||||
if (trace->nr_entries < trace->max_entries) {
|
||||
trace->entries[trace->nr_entries++] = addr;
|
||||
return 0;
|
||||
} else {
|
||||
return -1; /* no more room, stop walking the stack */
|
||||
}
|
||||
}
|
||||
|
||||
static void save_stack_address(void *data, unsigned long addr, int reliable)
|
||||
static int save_stack_address(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
return __save_stack_address(data, addr, reliable, false);
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
return __save_stack_address(data, addr, reliable, true);
|
||||
|
@ -23,12 +23,13 @@ static int backtrace_stack(void *data, char *name)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void backtrace_address(void *data, unsigned long addr, int reliable)
|
||||
static int backtrace_address(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
unsigned int *depth = data;
|
||||
|
||||
if ((*depth)--)
|
||||
oprofile_add_trace(addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct stacktrace_ops backtrace_ops = {
|
||||
|
@ -964,11 +964,20 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
|
||||
|
||||
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
|
||||
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
|
||||
extern struct perf_callchain_entry *
|
||||
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||
bool crosstask, bool add_mark);
|
||||
extern int get_callchain_buffers(void);
|
||||
extern void put_callchain_buffers(void);
|
||||
|
||||
static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||
static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||
{
|
||||
if (entry->nr < PERF_MAX_STACK_DEPTH)
|
||||
if (entry->nr < PERF_MAX_STACK_DEPTH) {
|
||||
entry->ip[entry->nr++] = ip;
|
||||
return 0;
|
||||
} else {
|
||||
return -1; /* no more room, stop walking the stack */
|
||||
}
|
||||
}
|
||||
|
||||
extern int sysctl_perf_event_paranoid;
|
||||
|
@ -159,15 +159,24 @@ put_callchain_entry(int rctx)
|
||||
struct perf_callchain_entry *
|
||||
perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
||||
{
|
||||
int rctx;
|
||||
struct perf_callchain_entry *entry;
|
||||
|
||||
int kernel = !event->attr.exclude_callchain_kernel;
|
||||
int user = !event->attr.exclude_callchain_user;
|
||||
bool kernel = !event->attr.exclude_callchain_kernel;
|
||||
bool user = !event->attr.exclude_callchain_user;
|
||||
/* Disallow cross-task user callchains. */
|
||||
bool crosstask = event->ctx->task && event->ctx->task != current;
|
||||
|
||||
if (!kernel && !user)
|
||||
return NULL;
|
||||
|
||||
return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
|
||||
}
|
||||
|
||||
struct perf_callchain_entry *
|
||||
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||
bool crosstask, bool add_mark)
|
||||
{
|
||||
struct perf_callchain_entry *entry;
|
||||
int rctx;
|
||||
|
||||
entry = get_callchain_entry(&rctx);
|
||||
if (rctx == -1)
|
||||
return NULL;
|
||||
@ -175,10 +184,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
||||
if (!entry)
|
||||
goto exit_put;
|
||||
|
||||
entry->nr = 0;
|
||||
entry->nr = init_nr;
|
||||
|
||||
if (kernel && !user_mode(regs)) {
|
||||
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
||||
if (add_mark)
|
||||
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
||||
perf_callchain_kernel(entry, regs);
|
||||
}
|
||||
|
||||
@ -191,13 +201,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
if (regs) {
|
||||
/*
|
||||
* Disallow cross-task user callchains.
|
||||
*/
|
||||
if (event->ctx->task && event->ctx->task != current)
|
||||
if (crosstask)
|
||||
goto exit_put;
|
||||
|
||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
||||
if (add_mark)
|
||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_user(entry, regs);
|
||||
}
|
||||
}
|
||||
|
@ -182,8 +182,6 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
|
||||
/* Callchain handling */
|
||||
extern struct perf_callchain_entry *
|
||||
perf_callchain(struct perf_event *event, struct pt_regs *regs);
|
||||
extern int get_callchain_buffers(void);
|
||||
extern void put_callchain_buffers(void);
|
||||
|
||||
static inline int get_recursion_context(int *recursion)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user