trace_hardirqs_on/off() is only partially safe vs. RCU idle. The tracer core itself is safe, but the resulting tracepoints can be utilized by e.g. BPF which is unsafe. Provide variants which do not contain the lockdep invocation so the lockdep and tracer invocations can be split at the call site and placed properly. This is required because lockdep needs to be aware of the state before switching away from RCU idle and after switching to RCU idle because these transitions can take locks. As these code pathes are going to be non-instrumentable the tracer can be invoked after RCU is turned on and before the switch to RCU idle. So for these new variants there is no need to invoke the rcuidle aware tracer functions. Name them so they match the lockdep counterparts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200505134100.270771162@linutronix.de
132 lines
3.5 KiB
C
132 lines
3.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* preemptoff and irqoff tracepoints
|
|
*
|
|
* Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
|
|
*/
|
|
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/module.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/kprobes.h>
|
|
#include "trace.h"
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/preemptirq.h>
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
/* Per-cpu variable to prevent redundant calls when IRQs already off */
|
|
static DEFINE_PER_CPU(int, tracing_irq_cpu);
|
|
|
|
/*
|
|
* Like trace_hardirqs_on() but without the lockdep invocation. This is
|
|
* used in the low level entry code where the ordering vs. RCU is important
|
|
* and lockdep uses a staged approach which splits the lockdep hardirq
|
|
* tracking into a RCU on and a RCU off section.
|
|
*/
|
|
void trace_hardirqs_on_prepare(void)
|
|
{
|
|
if (this_cpu_read(tracing_irq_cpu)) {
|
|
if (!in_nmi())
|
|
trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
|
|
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
|
this_cpu_write(tracing_irq_cpu, 0);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_on_prepare);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
|
|
|
|
void trace_hardirqs_on(void)
|
|
{
|
|
if (this_cpu_read(tracing_irq_cpu)) {
|
|
if (!in_nmi())
|
|
trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
|
|
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
|
|
this_cpu_write(tracing_irq_cpu, 0);
|
|
}
|
|
|
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_on);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_on);
|
|
|
|
/*
|
|
* Like trace_hardirqs_off() but without the lockdep invocation. This is
|
|
* used in the low level entry code where the ordering vs. RCU is important
|
|
* and lockdep uses a staged approach which splits the lockdep hardirq
|
|
* tracking into a RCU on and a RCU off section.
|
|
*/
|
|
void trace_hardirqs_off_prepare(void)
|
|
{
|
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
|
this_cpu_write(tracing_irq_cpu, 1);
|
|
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
|
if (!in_nmi())
|
|
trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
|
|
}
|
|
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_off_prepare);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_off_prepare);
|
|
|
|
void trace_hardirqs_off(void)
|
|
{
|
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
|
this_cpu_write(tracing_irq_cpu, 1);
|
|
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
|
|
if (!in_nmi())
|
|
trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
|
|
}
|
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_off);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_off);
|
|
|
|
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
|
{
|
|
if (this_cpu_read(tracing_irq_cpu)) {
|
|
if (!in_nmi())
|
|
trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
|
|
tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
|
|
this_cpu_write(tracing_irq_cpu, 0);
|
|
}
|
|
|
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
|
|
|
|
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
|
|
{
|
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
|
this_cpu_write(tracing_irq_cpu, 1);
|
|
tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
|
|
if (!in_nmi())
|
|
trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
|
|
}
|
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
|
}
|
|
EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
|
NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
|
|
#endif /* CONFIG_TRACE_IRQFLAGS */
|
|
|
|
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
|
|
|
|
void trace_preempt_on(unsigned long a0, unsigned long a1)
|
|
{
|
|
if (!in_nmi())
|
|
trace_preempt_enable_rcuidle(a0, a1);
|
|
tracer_preempt_on(a0, a1);
|
|
}
|
|
|
|
void trace_preempt_off(unsigned long a0, unsigned long a1)
|
|
{
|
|
if (!in_nmi())
|
|
trace_preempt_disable_rcuidle(a0, a1);
|
|
tracer_preempt_off(a0, a1);
|
|
}
|
|
#endif
|