forked from Minki/linux
sh: oprofile: Backtrace support.
This patch improves the oprofile support on sh and adds backtrace support. Signed-off-by: Dave Peverley <dpeverley@mpc-data.co.uk> Signed-off-by: Chris Smith <chris.smith@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
60a51fbe5d
commit
40a8b421b6
118
arch/sh/oprofile/backtrace.c
Normal file
118
arch/sh/oprofile/backtrace.c
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* SH specific backtracing code for oprofile
|
||||
*
|
||||
* Copyright 2007 STMicroelectronics Ltd.
|
||||
*
|
||||
* Author: Dave Peverley <dpeverley@mpc-data.co.uk>
|
||||
*
|
||||
* Based on ARM oprofile backtrace code by Richard Purdie and in turn, i386
|
||||
* oprofile backtrace code by John Levon, David Smith
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
#include <linux/oprofile.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/* Limit to stop backtracing too far. */
|
||||
static int backtrace_limit = 20;
|
||||
|
||||
static unsigned long *
|
||||
user_backtrace(unsigned long *stackaddr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long buf_stack;
|
||||
|
||||
/* Also check accessibility of address */
|
||||
if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long)))
|
||||
return NULL;
|
||||
|
||||
if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long)))
|
||||
return NULL;
|
||||
|
||||
/* Quick paranoia check */
|
||||
if (buf_stack & 3)
|
||||
return NULL;
|
||||
|
||||
oprofile_add_trace(buf_stack);
|
||||
|
||||
stackaddr++;
|
||||
|
||||
return stackaddr;
|
||||
}
|
||||
|
||||
/*
|
||||
* | | /\ Higher addresses
|
||||
* | |
|
||||
* --------------- stack base (address of current_thread_info)
|
||||
* | thread info |
|
||||
* . .
|
||||
* | stack |
|
||||
* --------------- saved regs->regs[15] value if valid
|
||||
* . .
|
||||
* --------------- struct pt_regs stored on stack (struct pt_regs *)
|
||||
* | |
|
||||
* . .
|
||||
* | |
|
||||
* --------------- ???
|
||||
* | |
|
||||
* | | \/ Lower addresses
|
||||
*
|
||||
* Thus, &pt_regs <-> stack base restricts the valid(ish) fp values
|
||||
*/
|
||||
static int valid_kernel_stack(unsigned long *stackaddr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long stack = (unsigned long)regs;
|
||||
unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
|
||||
|
||||
return ((unsigned long)stackaddr > stack) && ((unsigned long)stackaddr < stack_base);
|
||||
}
|
||||
|
||||
static unsigned long *
|
||||
kernel_backtrace(unsigned long *stackaddr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
/*
|
||||
* If not a valid kernel address, keep going till we find one
|
||||
* or the SP stops being a valid address.
|
||||
*/
|
||||
do {
|
||||
addr = *stackaddr++;
|
||||
|
||||
if (__kernel_text_address(addr)) {
|
||||
oprofile_add_trace(addr);
|
||||
break;
|
||||
}
|
||||
} while (valid_kernel_stack(stackaddr, regs));
|
||||
|
||||
return stackaddr;
|
||||
}
|
||||
|
||||
void sh_backtrace(struct pt_regs * const regs, unsigned int depth)
|
||||
{
|
||||
unsigned long *stackaddr;
|
||||
|
||||
/*
|
||||
* Paranoia - clip max depth as we could get lost in the weeds.
|
||||
*/
|
||||
if (depth > backtrace_limit)
|
||||
depth = backtrace_limit;
|
||||
|
||||
stackaddr = (unsigned long *)regs->regs[15];
|
||||
if (!user_mode(regs)) {
|
||||
while (depth-- && valid_kernel_stack(stackaddr, regs))
|
||||
stackaddr = kernel_backtrace(stackaddr, regs);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
while (depth-- && (stackaddr != NULL))
|
||||
stackaddr = user_backtrace(stackaddr, regs);
|
||||
}
|
@ -27,6 +27,8 @@ static struct op_sh_model *model;
|
||||
|
||||
static struct op_counter_config ctr[20];
|
||||
|
||||
extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
|
||||
|
||||
static int op_sh_setup(void)
|
||||
{
|
||||
/* Pre-compute the values to stuff in the hardware registers. */
|
||||
@ -85,6 +87,13 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||
struct op_sh_model *lmodel = NULL;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Always assign the backtrace op. If the counter initialization
|
||||
* fails, we fall back to the timer which will still make use of
|
||||
* this.
|
||||
*/
|
||||
ops->backtrace = sh_backtrace;
|
||||
|
||||
switch (current_cpu_data.type) {
|
||||
/* SH-4 types */
|
||||
case CPU_SH7750:
|
||||
|
Loading…
Reference in New Issue
Block a user