mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 08:02:07 +00:00
kernel/watchdog.c: perform all-CPU backtrace in case of hard lockup
In many cases of hardlockup reports, it's actually not possible to know why it triggered, because the CPU that got stuck is usually waiting on a resource (with IRQs disabled) in posession of some other CPU is holding. IOW, we are often looking at the stacktrace of the victim and not the actual offender. Introduce sysctl / cmdline parameter that makes it possible to have hardlockup detector perform all-CPU backtrace. Signed-off-by: Jiri Kosina <jkosina@suse.cz> Reviewed-by: Aaron Tomlin <atomlin@redhat.com> Cc: Ulrich Obergfell <uobergfe@redhat.com> Acked-by: Don Zickus <dzickus@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ee7fed5405
commit
55537871ef
@ -1269,6 +1269,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||||||
Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
|
Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
|
||||||
Default: 1024
|
Default: 1024
|
||||||
|
|
||||||
|
hardlockup_all_cpu_backtrace=
|
||||||
|
[KNL] Should the hard-lockup detector generate
|
||||||
|
backtraces on all cpus.
|
||||||
|
Format: <integer>
|
||||||
|
|
||||||
hashdist= [KNL,NUMA] Large hashes allocated during boot
|
hashdist= [KNL,NUMA] Large hashes allocated during boot
|
||||||
are distributed across NUMA nodes. Defaults on
|
are distributed across NUMA nodes. Defaults on
|
||||||
for 64-bit NUMA, off otherwise.
|
for 64-bit NUMA, off otherwise.
|
||||||
|
@ -33,6 +33,7 @@ show up in /proc/sys/kernel:
|
|||||||
- domainname
|
- domainname
|
||||||
- hostname
|
- hostname
|
||||||
- hotplug
|
- hotplug
|
||||||
|
- hardlockup_all_cpu_backtrace
|
||||||
- hung_task_panic
|
- hung_task_panic
|
||||||
- hung_task_check_count
|
- hung_task_check_count
|
||||||
- hung_task_timeout_secs
|
- hung_task_timeout_secs
|
||||||
@ -292,6 +293,17 @@ Information Service) or YP (Yellow Pages) domainname. These two
|
|||||||
domain names are in general different. For a detailed discussion
|
domain names are in general different. For a detailed discussion
|
||||||
see the hostname(1) man page.
|
see the hostname(1) man page.
|
||||||
|
|
||||||
|
==============================================================
|
||||||
|
hardlockup_all_cpu_backtrace:
|
||||||
|
|
||||||
|
This value controls the hard lockup detector behavior when a hard
|
||||||
|
lockup condition is detected as to whether or not to gather further
|
||||||
|
debug information. If enabled, arch-specific all-CPU stack dumping
|
||||||
|
will be initiated.
|
||||||
|
|
||||||
|
0: do nothing. This is the default behavior.
|
||||||
|
|
||||||
|
1: on detection capture more debug information.
|
||||||
==============================================================
|
==============================================================
|
||||||
|
|
||||||
hotplug:
|
hotplug:
|
||||||
|
@ -73,6 +73,7 @@ extern int watchdog_user_enabled;
|
|||||||
extern int watchdog_thresh;
|
extern int watchdog_thresh;
|
||||||
extern unsigned long *watchdog_cpumask_bits;
|
extern unsigned long *watchdog_cpumask_bits;
|
||||||
extern int sysctl_softlockup_all_cpu_backtrace;
|
extern int sysctl_softlockup_all_cpu_backtrace;
|
||||||
|
extern int sysctl_hardlockup_all_cpu_backtrace;
|
||||||
struct ctl_table;
|
struct ctl_table;
|
||||||
extern int proc_watchdog(struct ctl_table *, int ,
|
extern int proc_watchdog(struct ctl_table *, int ,
|
||||||
void __user *, size_t *, loff_t *);
|
void __user *, size_t *, loff_t *);
|
||||||
|
@ -898,6 +898,15 @@ static struct ctl_table kern_table[] = {
|
|||||||
.extra1 = &zero,
|
.extra1 = &zero,
|
||||||
.extra2 = &one,
|
.extra2 = &one,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.procname = "hardlockup_all_cpu_backtrace",
|
||||||
|
.data = &sysctl_hardlockup_all_cpu_backtrace,
|
||||||
|
.maxlen = sizeof(int),
|
||||||
|
.mode = 0644,
|
||||||
|
.proc_handler = proc_dointvec_minmax,
|
||||||
|
.extra1 = &zero,
|
||||||
|
.extra2 = &one,
|
||||||
|
},
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
|
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
|
||||||
|
@ -57,8 +57,10 @@ int __read_mostly watchdog_thresh = 10;
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
|
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
|
||||||
|
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
|
||||||
#else
|
#else
|
||||||
#define sysctl_softlockup_all_cpu_backtrace 0
|
#define sysctl_softlockup_all_cpu_backtrace 0
|
||||||
|
#define sysctl_hardlockup_all_cpu_backtrace 0
|
||||||
#endif
|
#endif
|
||||||
static struct cpumask watchdog_cpumask __read_mostly;
|
static struct cpumask watchdog_cpumask __read_mostly;
|
||||||
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
|
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
|
||||||
@ -112,6 +114,7 @@ static unsigned long soft_lockup_nmi_warn;
|
|||||||
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
||||||
static int hardlockup_panic =
|
static int hardlockup_panic =
|
||||||
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
|
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
|
||||||
|
static unsigned long hardlockup_allcpu_dumped;
|
||||||
/*
|
/*
|
||||||
* We may not want to enable hard lockup detection by default in all cases,
|
* We may not want to enable hard lockup detection by default in all cases,
|
||||||
* for example when running the kernel as a guest on a hypervisor. In these
|
* for example when running the kernel as a guest on a hypervisor. In these
|
||||||
@ -173,6 +176,13 @@ static int __init softlockup_all_cpu_backtrace_setup(char *str)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
|
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
|
||||||
|
static int __init hardlockup_all_cpu_backtrace_setup(char *str)
|
||||||
|
{
|
||||||
|
sysctl_hardlockup_all_cpu_backtrace =
|
||||||
|
!!simple_strtol(str, NULL, 0);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -318,17 +328,30 @@ static void watchdog_overflow_callback(struct perf_event *event,
|
|||||||
*/
|
*/
|
||||||
if (is_hardlockup()) {
|
if (is_hardlockup()) {
|
||||||
int this_cpu = smp_processor_id();
|
int this_cpu = smp_processor_id();
|
||||||
|
struct pt_regs *regs = get_irq_regs();
|
||||||
|
|
||||||
/* only print hardlockups once */
|
/* only print hardlockups once */
|
||||||
if (__this_cpu_read(hard_watchdog_warn) == true)
|
if (__this_cpu_read(hard_watchdog_warn) == true)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (hardlockup_panic)
|
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
|
||||||
panic("Watchdog detected hard LOCKUP on cpu %d",
|
print_modules();
|
||||||
this_cpu);
|
print_irqtrace_events(current);
|
||||||
|
if (regs)
|
||||||
|
show_regs(regs);
|
||||||
else
|
else
|
||||||
WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
|
dump_stack();
|
||||||
this_cpu);
|
|
||||||
|
/*
|
||||||
|
* Perform all-CPU dump only once to avoid multiple hardlockups
|
||||||
|
* generating interleaving traces
|
||||||
|
*/
|
||||||
|
if (sysctl_hardlockup_all_cpu_backtrace &&
|
||||||
|
!test_and_set_bit(0, &hardlockup_allcpu_dumped))
|
||||||
|
trigger_allbutself_cpu_backtrace();
|
||||||
|
|
||||||
|
if (hardlockup_panic)
|
||||||
|
panic("Hard LOCKUP");
|
||||||
|
|
||||||
__this_cpu_write(hard_watchdog_warn, true);
|
__this_cpu_write(hard_watchdog_warn, true);
|
||||||
return;
|
return;
|
||||||
|
Loading…
Reference in New Issue
Block a user