ftrace/sysprof: don't trace the user stack if we are a kernel thread.
Check that current->mm is non-NULL before attempting to trace the user stack. Also take depth of the kernel stack into account when comparing against sample_max_depth. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
8a9e94c1fb
commit
cf3271a73b
@ -95,13 +95,12 @@ const static struct stacktrace_ops backtrace_ops = {
|
|||||||
.address = backtrace_address,
|
.address = backtrace_address,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct pt_regs *
|
static int
|
||||||
trace_kernel(struct pt_regs *regs, struct trace_array *tr,
|
trace_kernel(struct pt_regs *regs, struct trace_array *tr,
|
||||||
struct trace_array_cpu *data)
|
struct trace_array_cpu *data)
|
||||||
{
|
{
|
||||||
struct backtrace_info info;
|
struct backtrace_info info;
|
||||||
unsigned long bp;
|
unsigned long bp;
|
||||||
char *user_stack;
|
|
||||||
char *stack;
|
char *stack;
|
||||||
|
|
||||||
info.tr = tr;
|
info.tr = tr;
|
||||||
@ -119,10 +118,7 @@ trace_kernel(struct pt_regs *regs, struct trace_array *tr,
|
|||||||
|
|
||||||
dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);
|
dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);
|
||||||
|
|
||||||
/* Now trace the user stack */
|
return info.pos;
|
||||||
user_stack = ((char *)current->thread.sp0 - sizeof(struct pt_regs));
|
|
||||||
|
|
||||||
return (struct pt_regs *)user_stack;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void timer_notify(struct pt_regs *regs, int cpu)
|
static void timer_notify(struct pt_regs *regs, int cpu)
|
||||||
@ -150,32 +146,44 @@ static void timer_notify(struct pt_regs *regs, int cpu)
|
|||||||
__trace_special(tr, data, 0, 0, current->pid);
|
__trace_special(tr, data, 0, 0, current->pid);
|
||||||
|
|
||||||
if (!is_user)
|
if (!is_user)
|
||||||
regs = trace_kernel(regs, tr, data);
|
i = trace_kernel(regs, tr, data);
|
||||||
|
else
|
||||||
|
i = 0;
|
||||||
|
|
||||||
fp = (void __user *)regs->bp;
|
/*
|
||||||
|
* Trace user stack if we are not a kernel thread
|
||||||
|
*/
|
||||||
|
if (current->mm && i < sample_max_depth) {
|
||||||
|
regs = (struct pt_regs *)current->thread.sp0 - 1;
|
||||||
|
|
||||||
__trace_special(tr, data, 2, regs->ip, 0);
|
fp = (void __user *)regs->bp;
|
||||||
|
|
||||||
for (i = 0; i < sample_max_depth; i++) {
|
__trace_special(tr, data, 2, regs->ip, 0);
|
||||||
frame.next_fp = 0;
|
|
||||||
frame.return_address = 0;
|
while (i < sample_max_depth) {
|
||||||
if (!copy_stack_frame(fp, &frame))
|
frame.next_fp = 0;
|
||||||
break;
|
frame.return_address = 0;
|
||||||
if ((unsigned long)fp < regs->sp)
|
if (!copy_stack_frame(fp, &frame))
|
||||||
break;
|
break;
|
||||||
|
if ((unsigned long)fp < regs->sp)
|
||||||
|
break;
|
||||||
|
|
||||||
|
__trace_special(tr, data, 2, frame.return_address,
|
||||||
|
(unsigned long)fp);
|
||||||
|
fp = frame.next_fp;
|
||||||
|
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
__trace_special(tr, data, 2, frame.return_address,
|
|
||||||
(unsigned long)fp);
|
|
||||||
fp = frame.next_fp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__trace_special(tr, data, 3, current->pid, i);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Special trace entry if we overflow the max depth:
|
* Special trace entry if we overflow the max depth:
|
||||||
*/
|
*/
|
||||||
if (i == sample_max_depth)
|
if (i == sample_max_depth)
|
||||||
__trace_special(tr, data, -1, -1, -1);
|
__trace_special(tr, data, -1, -1, -1);
|
||||||
|
|
||||||
|
__trace_special(tr, data, 3, current->pid, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
|
static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
|
||||||
|
Loading…
Reference in New Issue
Block a user