mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
eac2f3059e
As [1] and [2] said, the arch_stack_walk should not to trace itself, or it will
leave the trace unexpectedly when called. The example is when we do "cat
/sys/kernel/debug/page_owner", all pages' stack is the same.
arch_stack_walk+0x18/0x20
stack_trace_save+0x40/0x60
register_dummy_stack+0x24/0x5e
init_page_owner+0x2e
So we use __builtin_frame_address(1) as the first frame to be walked. And mark
the arch_stack_walk() noinline.
We found that pr_cont will affact pages' stack whose task state is RUNNING when
testing "echo t > /proc/sysrq-trigger". So move the place of pr_cont and mark
the function dump_backtrace() noinline.
Also we move the case when task == NULL into else branch, and test for it in
"echo c > /proc/sysrq-trigger".
[1] https://lore.kernel.org/lkml/20210319184106.5688-1-mark.rutland@arm.com/
[2] https://lore.kernel.org/lkml/20210317142050.57712-1-chenjun102@huawei.com/
Signed-off-by: Chen Huang <chenhuang5@huawei.com>
Fixes: 5d8544e2d0
("RISC-V: Generic library routines and assembly")
Cc: stable@vger.kernel.org
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
149 lines
3.4 KiB
C
149 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2008 ARM Limited
|
|
* Copyright (C) 2014 Regents of the University of California
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/debug.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/ftrace.h>
|
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
register unsigned long sp_in_global __asm__("sp");
|
|
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
|
|
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
|
bool (*fn)(void *, unsigned long), void *arg)
|
|
{
|
|
unsigned long fp, sp, pc;
|
|
|
|
if (regs) {
|
|
fp = frame_pointer(regs);
|
|
sp = user_stack_pointer(regs);
|
|
pc = instruction_pointer(regs);
|
|
} else if (task == current) {
|
|
fp = (unsigned long)__builtin_frame_address(1);
|
|
sp = (unsigned long)__builtin_frame_address(0);
|
|
pc = (unsigned long)__builtin_return_address(0);
|
|
} else {
|
|
/* task blocked in __switch_to */
|
|
fp = task->thread.s[0];
|
|
sp = task->thread.sp;
|
|
pc = task->thread.ra;
|
|
}
|
|
|
|
for (;;) {
|
|
unsigned long low, high;
|
|
struct stackframe *frame;
|
|
|
|
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
|
|
break;
|
|
|
|
/* Validate frame pointer */
|
|
low = sp + sizeof(struct stackframe);
|
|
high = ALIGN(sp, THREAD_SIZE);
|
|
if (unlikely(fp < low || fp > high || fp & 0x7))
|
|
break;
|
|
/* Unwind stack frame */
|
|
frame = (struct stackframe *)fp - 1;
|
|
sp = fp;
|
|
if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
|
|
fp = frame->ra;
|
|
pc = regs->ra;
|
|
} else {
|
|
fp = frame->fp;
|
|
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
|
|
(unsigned long *)(fp - 8));
|
|
}
|
|
|
|
}
|
|
}
|
|
|
|
#else /* !CONFIG_FRAME_POINTER */
|
|
|
|
void notrace walk_stackframe(struct task_struct *task,
|
|
struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
|
|
{
|
|
unsigned long sp, pc;
|
|
unsigned long *ksp;
|
|
|
|
if (regs) {
|
|
sp = user_stack_pointer(regs);
|
|
pc = instruction_pointer(regs);
|
|
} else if (task == NULL || task == current) {
|
|
sp = sp_in_global;
|
|
pc = (unsigned long)walk_stackframe;
|
|
} else {
|
|
/* task blocked in __switch_to */
|
|
sp = task->thread.sp;
|
|
pc = task->thread.ra;
|
|
}
|
|
|
|
if (unlikely(sp & 0x7))
|
|
return;
|
|
|
|
ksp = (unsigned long *)sp;
|
|
while (!kstack_end(ksp)) {
|
|
if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
|
|
break;
|
|
pc = (*ksp++) - 0x4;
|
|
}
|
|
}
|
|
|
|
#endif /* CONFIG_FRAME_POINTER */
|
|
|
|
static bool print_trace_address(void *arg, unsigned long pc)
|
|
{
|
|
const char *loglvl = arg;
|
|
|
|
print_ip_sym(loglvl, pc);
|
|
return true;
|
|
}
|
|
|
|
noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
|
|
const char *loglvl)
|
|
{
|
|
walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
|
|
}
|
|
|
|
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
|
|
{
|
|
pr_cont("%sCall Trace:\n", loglvl);
|
|
dump_backtrace(NULL, task, loglvl);
|
|
}
|
|
|
|
static bool save_wchan(void *arg, unsigned long pc)
|
|
{
|
|
if (!in_sched_functions(pc)) {
|
|
unsigned long *p = arg;
|
|
*p = pc;
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
unsigned long get_wchan(struct task_struct *task)
|
|
{
|
|
unsigned long pc = 0;
|
|
|
|
if (likely(task && task != current && task->state != TASK_RUNNING))
|
|
walk_stackframe(task, NULL, save_wchan, &pc);
|
|
return pc;
|
|
}
|
|
|
|
#ifdef CONFIG_STACKTRACE
|
|
|
|
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
|
struct task_struct *task, struct pt_regs *regs)
|
|
{
|
|
walk_stackframe(task, regs, consume_entry, cookie);
|
|
}
|
|
|
|
#endif /* CONFIG_STACKTRACE */
|