2015-07-06 11:23:54 +00:00
|
|
|
/*
|
|
|
|
* arm64 callchain support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2015 ARM Limited
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
|
|
|
|
struct frame_tail {
|
|
|
|
struct frame_tail __user *fp;
|
|
|
|
unsigned long lr;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the return address for a single stackframe and return a pointer to the
|
|
|
|
* next frame tail.
|
|
|
|
*/
|
|
|
|
static struct frame_tail __user *
|
|
|
|
user_backtrace(struct frame_tail __user *tail,
|
2016-04-28 15:30:53 +00:00
|
|
|
struct perf_callchain_entry_ctx *entry)
|
2015-07-06 11:23:54 +00:00
|
|
|
{
|
|
|
|
struct frame_tail buftail;
|
|
|
|
unsigned long err;
|
|
|
|
|
|
|
|
/* Also check accessibility of one struct frame_tail beyond */
|
|
|
|
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
pagefault_disable();
|
|
|
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
|
|
pagefault_enable();
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
perf_callchain_store(entry, buftail.lr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Frame pointers should strictly progress back up the stack
|
|
|
|
* (towards higher addresses).
|
|
|
|
*/
|
|
|
|
if (tail >= buftail.fp)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return buftail.fp;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
/*
|
|
|
|
* The registers we're interested in are at the end of the variable
|
|
|
|
* length saved register structure. The fp points at the end of this
|
|
|
|
* structure so the address of this struct is:
|
|
|
|
* (struct compat_frame_tail *)(xxx->fp)-1
|
|
|
|
*
|
|
|
|
* This code has been adapted from the ARM OProfile support.
|
|
|
|
*/
|
|
|
|
struct compat_frame_tail {
|
|
|
|
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
|
|
|
|
u32 sp;
|
|
|
|
u32 lr;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
static struct compat_frame_tail __user *
|
|
|
|
compat_user_backtrace(struct compat_frame_tail __user *tail,
|
2016-04-28 15:30:53 +00:00
|
|
|
struct perf_callchain_entry_ctx *entry)
|
2015-07-06 11:23:54 +00:00
|
|
|
{
|
|
|
|
struct compat_frame_tail buftail;
|
|
|
|
unsigned long err;
|
|
|
|
|
|
|
|
/* Also check accessibility of one struct frame_tail beyond */
|
|
|
|
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
pagefault_disable();
|
|
|
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
|
|
pagefault_enable();
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
perf_callchain_store(entry, buftail.lr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Frame pointers should strictly progress back up the stack
|
|
|
|
* (towards higher addresses).
|
|
|
|
*/
|
|
|
|
if (tail + 1 >= (struct compat_frame_tail __user *)
|
|
|
|
compat_ptr(buftail.fp))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
2016-04-28 15:30:53 +00:00
|
|
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
2015-07-06 11:23:54 +00:00
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
|
|
|
/* We don't support guest os callchain now */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
perf_callchain_store(entry, regs->pc);
|
|
|
|
|
|
|
|
if (!compat_user_mode(regs)) {
|
|
|
|
/* AARCH64 mode */
|
|
|
|
struct frame_tail __user *tail;
|
|
|
|
|
|
|
|
tail = (struct frame_tail __user *)regs->regs[29];
|
|
|
|
|
perf core: Add a 'nr' field to perf_event_callchain_context
We will use it to count how many addresses are in the entry->ip[] array,
excluding PERF_CONTEXT_{KERNEL,USER,etc} entries, so that we can really
return the number of entries specified by the user via the relevant
sysctl, kernel.perf_event_max_contexts, or via the per event
perf_event_attr.sample_max_stack knob.
This way we keep the perf_sample->ip_callchain->nr meaning, that is the
number of entries, be it real addresses or PERF_CONTEXT_ entries, while
honouring the max_stack knobs, i.e. the end result will be max_stack
entries if we have at least that many entries in a given stack trace.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-s8teto51tdqvlfhefndtat9r@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-05-10 21:08:32 +00:00
|
|
|
while (entry->nr < entry->max_stack &&
|
2015-07-06 11:23:54 +00:00
|
|
|
tail && !((unsigned long)tail & 0xf))
|
|
|
|
tail = user_backtrace(tail, entry);
|
|
|
|
} else {
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
/* AARCH32 compat mode */
|
|
|
|
struct compat_frame_tail __user *tail;
|
|
|
|
|
|
|
|
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
|
|
|
|
|
perf core: Add a 'nr' field to perf_event_callchain_context
We will use it to count how many addresses are in the entry->ip[] array,
excluding PERF_CONTEXT_{KERNEL,USER,etc} entries, so that we can really
return the number of entries specified by the user via the relevant
sysctl, kernel.perf_event_max_contexts, or via the per event
perf_event_attr.sample_max_stack knob.
This way we keep the perf_sample->ip_callchain->nr meaning, that is the
number of entries, be it real addresses or PERF_CONTEXT_ entries, while
honouring the max_stack knobs, i.e. the end result will be max_stack
entries if we have at least that many entries in a given stack trace.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-s8teto51tdqvlfhefndtat9r@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-05-10 21:08:32 +00:00
|
|
|
while ((entry->nr < entry->max_stack) &&
|
2015-07-06 11:23:54 +00:00
|
|
|
tail && !((unsigned long)tail & 0x3))
|
|
|
|
tail = compat_user_backtrace(tail, entry);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Gets called by walk_stackframe() for every stackframe. This will be called
|
|
|
|
* whist unwinding the stackframe and is like a subroutine return so we use
|
|
|
|
* the PC.
|
|
|
|
*/
|
|
|
|
static int callchain_trace(struct stackframe *frame, void *data)
|
|
|
|
{
|
2016-04-28 15:30:53 +00:00
|
|
|
struct perf_callchain_entry_ctx *entry = data;
|
2015-07-06 11:23:54 +00:00
|
|
|
perf_callchain_store(entry, frame->pc);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-28 15:30:53 +00:00
|
|
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
2015-07-06 11:23:54 +00:00
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct stackframe frame;
|
|
|
|
|
|
|
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
|
|
|
/* We don't support guest os callchain now */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
frame.fp = regs->regs[29];
|
|
|
|
frame.pc = regs->pc;
|
2015-12-15 08:33:41 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
frame.graph = current->curr_ret_stack;
|
|
|
|
#endif
|
2015-07-06 11:23:54 +00:00
|
|
|
|
2015-12-15 08:33:40 +00:00
|
|
|
walk_stackframe(current, &frame, callchain_trace, entry);
|
2015-07-06 11:23:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
|
|
|
return perf_guest_cbs->get_guest_ip();
|
|
|
|
|
|
|
|
return instruction_pointer(regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long perf_misc_flags(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int misc = 0;
|
|
|
|
|
|
|
|
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
|
|
|
if (perf_guest_cbs->is_user_mode())
|
|
|
|
misc |= PERF_RECORD_MISC_GUEST_USER;
|
|
|
|
else
|
|
|
|
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
|
|
|
} else {
|
|
|
|
if (user_mode(regs))
|
|
|
|
misc |= PERF_RECORD_MISC_USER;
|
|
|
|
else
|
|
|
|
misc |= PERF_RECORD_MISC_KERNEL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return misc;
|
|
|
|
}
|