2020-07-22 21:59:56 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include <linux/context_tracking.h>
|
|
|
|
#include <linux/entry-common.h>
|
2022-02-09 18:20:45 +00:00
|
|
|
#include <linux/resume_user_mode.h>
|
2020-11-18 19:48:43 +00:00
|
|
|
#include <linux/highmem.h>
|
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.
On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:
* Hardware and software control flow integrity schemes can require the
addition of "landing pad" instructions (e.g. `BTI` for arm64), which
will also be present at the "real" callee.
* Limited branch ranges can require that trampolines generate or load an
address into a register and perform an indirect branch (or at least
have a slow path that does so). This loses some of the benefits of
having a direct branch.
* Interaction with SW CFI schemes can be complicated and fragile, e.g.
requiring that we can recognise idiomatic codegen and remove
indirections understand, at least until clang proves more helpful
mechanisms for dealing with this.
For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.
For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
2022-02-14 16:52:14 +00:00
|
|
|
#include <linux/jump_label.h>
|
2022-09-15 15:04:14 +00:00
|
|
|
#include <linux/kmsan.h>
|
2020-07-22 21:59:57 +00:00
|
|
|
#include <linux/livepatch.h>
|
|
|
|
#include <linux/audit.h>
|
2021-05-27 11:34:41 +00:00
|
|
|
#include <linux/tick.h>
|
2020-07-22 21:59:56 +00:00
|
|
|
|
2020-11-27 19:32:35 +00:00
|
|
|
#include "common.h"
|
|
|
|
|
2020-07-22 21:59:56 +00:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/syscalls.h>
|
|
|
|
|
|
|
|
static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
|
|
|
|
{
|
|
|
|
if (unlikely(audit_context())) {
|
|
|
|
unsigned long args[6];
|
|
|
|
|
|
|
|
syscall_get_arguments(current, regs, args);
|
|
|
|
audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-18 07:45:20 +00:00
|
|
|
long syscall_trace_enter(struct pt_regs *regs, long syscall,
|
2020-11-16 17:42:05 +00:00
|
|
|
unsigned long work)
|
2020-07-22 21:59:56 +00:00
|
|
|
{
|
|
|
|
long ret = 0;
|
|
|
|
|
2020-11-27 19:32:35 +00:00
|
|
|
/*
|
|
|
|
* Handle Syscall User Dispatch. This must comes first, since
|
|
|
|
* the ABI here can be something that doesn't make sense for
|
|
|
|
* other syscall_work features.
|
|
|
|
*/
|
|
|
|
if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
|
|
|
|
if (syscall_user_dispatch(regs))
|
|
|
|
return -1L;
|
|
|
|
}
|
|
|
|
|
2020-07-22 21:59:56 +00:00
|
|
|
/* Handle ptrace */
|
2020-11-16 17:42:03 +00:00
|
|
|
if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
|
2022-01-27 18:00:55 +00:00
|
|
|
ret = ptrace_report_syscall_entry(regs);
|
2020-11-16 17:42:03 +00:00
|
|
|
if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
|
2020-07-22 21:59:56 +00:00
|
|
|
return -1L;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do seccomp after ptrace, to catch any tracer changes. */
|
2020-11-16 17:42:00 +00:00
|
|
|
if (work & SYSCALL_WORK_SECCOMP) {
|
2020-07-22 21:59:56 +00:00
|
|
|
ret = __secure_computing(NULL);
|
|
|
|
if (ret == -1L)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-09-12 00:58:26 +00:00
|
|
|
/* Either of the above might have changed the syscall number */
|
|
|
|
syscall = syscall_get_nr(current, regs);
|
|
|
|
|
2024-03-11 21:17:04 +00:00
|
|
|
if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) {
|
2020-07-22 21:59:56 +00:00
|
|
|
trace_sys_enter(regs, syscall);
|
2024-03-11 21:17:04 +00:00
|
|
|
/*
|
|
|
|
* Probes or BPF hooks in the tracepoint may have changed the
|
|
|
|
* system call number as well.
|
|
|
|
*/
|
|
|
|
syscall = syscall_get_nr(current, regs);
|
|
|
|
}
|
2020-07-22 21:59:56 +00:00
|
|
|
|
|
|
|
syscall_enter_audit(regs, syscall);
|
|
|
|
|
|
|
|
return ret ? : syscall;
|
|
|
|
}
|
|
|
|
|
2020-09-01 23:50:54 +00:00
|
|
|
noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
|
|
|
|
{
|
2023-12-18 07:45:19 +00:00
|
|
|
enter_from_user_mode(regs);
|
2020-09-01 23:50:54 +00:00
|
|
|
instrumentation_begin();
|
|
|
|
local_irq_enable();
|
|
|
|
instrumentation_end();
|
|
|
|
}
|
|
|
|
|
2020-07-22 21:59:57 +00:00
|
|
|
/* Workaround to allow gradual conversion of architecture code */
|
2022-02-09 15:51:14 +00:00
|
|
|
void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
|
2020-07-22 21:59:57 +00:00
|
|
|
|
2023-12-18 07:45:18 +00:00
|
|
|
/**
|
|
|
|
* exit_to_user_mode_loop - do any pending work before leaving to user space
|
|
|
|
* @regs: Pointer to pt_regs on entry stack
|
|
|
|
* @ti_work: TIF work flags as read by the caller
|
|
|
|
*/
|
|
|
|
__always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
|
|
|
|
unsigned long ti_work)
|
2020-07-22 21:59:57 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Before returning to user space ensure that all pending work
|
|
|
|
* items have been completed.
|
|
|
|
*/
|
|
|
|
while (ti_work & EXIT_TO_USER_MODE_WORK) {
|
|
|
|
|
|
|
|
local_irq_enable_exit_to_user(ti_work);
|
|
|
|
|
|
|
|
if (ti_work & _TIF_NEED_RESCHED)
|
|
|
|
schedule();
|
|
|
|
|
|
|
|
if (ti_work & _TIF_UPROBE)
|
|
|
|
uprobe_notify_resume(regs);
|
|
|
|
|
|
|
|
if (ti_work & _TIF_PATCH_PENDING)
|
|
|
|
klp_update_patch_state(current);
|
|
|
|
|
2020-10-26 20:32:28 +00:00
|
|
|
if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
2022-02-09 15:51:14 +00:00
|
|
|
arch_do_signal_or_restart(regs);
|
2020-07-22 21:59:57 +00:00
|
|
|
|
2021-09-01 20:30:27 +00:00
|
|
|
if (ti_work & _TIF_NOTIFY_RESUME)
|
2022-02-09 18:20:45 +00:00
|
|
|
resume_user_mode_work(regs);
|
2020-07-22 21:59:57 +00:00
|
|
|
|
|
|
|
/* Architecture specific TIF work */
|
|
|
|
arch_exit_to_user_mode_work(regs, ti_work);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable interrupts and reevaluate the work flags as they
|
|
|
|
* might have changed while interrupts and preemption was
|
|
|
|
* enabled above.
|
|
|
|
*/
|
|
|
|
local_irq_disable_exit_to_user();
|
2021-01-31 23:05:47 +00:00
|
|
|
|
|
|
|
/* Check if any of the above work has queued a deferred wakeup */
|
2021-05-27 11:34:41 +00:00
|
|
|
tick_nohz_user_enter_prepare();
|
2021-01-31 23:05:47 +00:00
|
|
|
|
2021-11-29 13:06:44 +00:00
|
|
|
ti_work = read_thread_flags();
|
2020-07-22 21:59:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the latest work state for arch_exit_to_user_mode() */
|
|
|
|
return ti_work;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-11-16 17:42:03 +00:00
|
|
|
* If SYSCALL_EMU is set, then the only reason to report is when
|
2021-02-03 18:00:48 +00:00
|
|
|
* SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
|
2020-09-19 08:09:36 +00:00
|
|
|
* instruction has been already reported in syscall_enter_from_user_mode().
|
2020-07-22 21:59:57 +00:00
|
|
|
*/
|
2020-11-16 17:42:03 +00:00
|
|
|
static inline bool report_single_step(unsigned long work)
|
2020-07-22 21:59:57 +00:00
|
|
|
{
|
2021-01-23 11:21:32 +00:00
|
|
|
if (work & SYSCALL_WORK_SYSCALL_EMU)
|
2020-11-16 17:42:03 +00:00
|
|
|
return false;
|
|
|
|
|
2021-02-03 18:00:48 +00:00
|
|
|
return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
|
2020-07-22 21:59:57 +00:00
|
|
|
}
|
2020-11-16 17:42:05 +00:00
|
|
|
|
|
|
|
static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
|
2020-07-22 21:59:57 +00:00
|
|
|
{
|
|
|
|
bool step;
|
|
|
|
|
2020-11-27 19:32:35 +00:00
|
|
|
/*
|
|
|
|
* If the syscall was rolled back due to syscall user dispatching,
|
|
|
|
* then the tracers below are not invoked for the same reason as
|
|
|
|
* the entry side was not invoked in syscall_trace_enter(): The ABI
|
|
|
|
* of these syscalls is unknown.
|
|
|
|
*/
|
|
|
|
if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
|
|
|
|
if (unlikely(current->syscall_dispatch.on_dispatch)) {
|
|
|
|
current->syscall_dispatch.on_dispatch = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-22 21:59:57 +00:00
|
|
|
audit_syscall_exit(regs);
|
|
|
|
|
2020-11-16 17:42:01 +00:00
|
|
|
if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
|
2020-07-22 21:59:57 +00:00
|
|
|
trace_sys_exit(regs, syscall_get_return_value(current, regs));
|
|
|
|
|
2020-11-16 17:42:03 +00:00
|
|
|
step = report_single_step(work);
|
2020-11-16 17:42:02 +00:00
|
|
|
if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
|
2022-01-27 18:00:55 +00:00
|
|
|
ptrace_report_syscall_exit(regs, step);
|
2020-07-22 21:59:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Syscall specific exit to user mode preparation. Runs with interrupts
|
|
|
|
* enabled.
|
|
|
|
*/
|
|
|
|
static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
|
|
|
|
{
|
2020-11-16 17:41:59 +00:00
|
|
|
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
|
2020-07-22 21:59:57 +00:00
|
|
|
unsigned long nr = syscall_get_nr(current, regs);
|
|
|
|
|
2023-07-25 11:08:50 +00:00
|
|
|
CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
|
2020-07-22 21:59:57 +00:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
|
|
|
|
if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
|
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
rseq_syscall(regs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do one-time syscall specific work. If these work items are
|
|
|
|
* enabled, we want to run them exactly once per syscall exit with
|
|
|
|
* interrupts enabled.
|
|
|
|
*/
|
2020-11-16 17:42:05 +00:00
|
|
|
if (unlikely(work & SYSCALL_WORK_EXIT))
|
|
|
|
syscall_exit_work(regs, work);
|
2020-07-22 21:59:57 +00:00
|
|
|
}
|
|
|
|
|
2020-12-01 14:27:55 +00:00
|
|
|
static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
|
2020-07-22 21:59:57 +00:00
|
|
|
{
|
|
|
|
syscall_exit_to_user_mode_prepare(regs);
|
|
|
|
local_irq_disable_exit_to_user();
|
|
|
|
exit_to_user_mode_prepare(regs);
|
2020-12-01 14:27:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void syscall_exit_to_user_mode_work(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
__syscall_exit_to_user_mode_work(regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
instrumentation_begin();
|
|
|
|
__syscall_exit_to_user_mode_work(regs);
|
2020-07-22 21:59:57 +00:00
|
|
|
instrumentation_end();
|
2023-12-18 07:45:18 +00:00
|
|
|
exit_to_user_mode();
|
2020-07-22 21:59:57 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 21:59:56 +00:00
|
|
|
noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
|
|
|
|
{
|
2023-12-18 07:45:19 +00:00
|
|
|
enter_from_user_mode(regs);
|
2020-07-22 21:59:56 +00:00
|
|
|
}
|
2020-07-22 21:59:57 +00:00
|
|
|
|
|
|
|
noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
instrumentation_begin();
|
|
|
|
exit_to_user_mode_prepare(regs);
|
|
|
|
instrumentation_end();
|
2023-12-18 07:45:18 +00:00
|
|
|
exit_to_user_mode();
|
2020-07-22 21:59:57 +00:00
|
|
|
}
|
2020-07-22 21:59:58 +00:00
|
|
|
|
2020-07-25 09:19:51 +00:00
|
|
|
noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
|
2020-07-22 21:59:58 +00:00
|
|
|
{
|
|
|
|
irqentry_state_t ret = {
|
|
|
|
.exit_rcu = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
irqentry_enter_from_user_mode(regs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-06-08 14:40:26 +00:00
|
|
|
* If this entry hit the idle task invoke ct_irq_enter() whether
|
2020-07-22 21:59:58 +00:00
|
|
|
* RCU is watching or not.
|
|
|
|
*
|
2020-11-04 23:01:57 +00:00
|
|
|
* Interrupts can nest when the first interrupt invokes softirq
|
2020-07-22 21:59:58 +00:00
|
|
|
* processing on return which enables interrupts.
|
|
|
|
*
|
|
|
|
* Scheduler ticks in the idle task can mark quiescent state and
|
|
|
|
* terminate a grace period, if and only if the timer interrupt is
|
|
|
|
* not nested into another interrupt.
|
|
|
|
*
|
2020-08-17 17:37:22 +00:00
|
|
|
* Checking for rcu_is_watching() here would prevent the nesting
|
2022-06-08 14:40:26 +00:00
|
|
|
* interrupt to invoke ct_irq_enter(). If that nested interrupt is
|
2020-07-22 21:59:58 +00:00
|
|
|
* the tick then rcu_flavor_sched_clock_irq() would wrongfully
|
2021-03-22 02:55:50 +00:00
|
|
|
* assume that it is the first interrupt and eventually claim
|
2020-11-04 23:01:57 +00:00
|
|
|
* quiescent state and end grace periods prematurely.
|
2020-07-22 21:59:58 +00:00
|
|
|
*
|
2022-06-08 14:40:26 +00:00
|
|
|
* Unconditionally invoke ct_irq_enter() so RCU state stays
|
2020-07-22 21:59:58 +00:00
|
|
|
* consistent.
|
|
|
|
*
|
|
|
|
* TINY_RCU does not support EQS, so let the compiler eliminate
|
|
|
|
* this part when enabled.
|
|
|
|
*/
|
|
|
|
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
|
|
|
|
/*
|
|
|
|
* If RCU is not watching then the same careful
|
|
|
|
* sequence vs. lockdep and tracing is required
|
2020-10-28 16:36:32 +00:00
|
|
|
* as in irqentry_enter_from_user_mode().
|
2020-07-22 21:59:58 +00:00
|
|
|
*/
|
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
2022-06-08 14:40:26 +00:00
|
|
|
ct_irq_enter();
|
2020-07-22 21:59:58 +00:00
|
|
|
instrumentation_begin();
|
2022-09-15 15:04:14 +00:00
|
|
|
kmsan_unpoison_entry_regs(regs);
|
2020-07-22 21:59:58 +00:00
|
|
|
trace_hardirqs_off_finish();
|
|
|
|
instrumentation_end();
|
|
|
|
|
|
|
|
ret.exit_rcu = true;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If RCU is watching then RCU only wants to check whether it needs
|
|
|
|
* to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
|
|
|
|
* already contains a warning when RCU is not watching, so no point
|
|
|
|
* in having another one here.
|
|
|
|
*/
|
2020-11-04 13:06:23 +00:00
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
2020-07-22 21:59:58 +00:00
|
|
|
instrumentation_begin();
|
2022-09-15 15:04:14 +00:00
|
|
|
kmsan_unpoison_entry_regs(regs);
|
2020-07-22 21:59:58 +00:00
|
|
|
rcu_irq_enter_check_tick();
|
2020-11-04 13:06:23 +00:00
|
|
|
trace_hardirqs_off_finish();
|
2020-07-22 21:59:58 +00:00
|
|
|
instrumentation_end();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-14 16:52:12 +00:00
|
|
|
void raw_irqentry_exit_cond_resched(void)
|
2020-07-22 21:59:58 +00:00
|
|
|
{
|
|
|
|
if (!preempt_count()) {
|
|
|
|
/* Sanity check RCU and thread stack */
|
|
|
|
rcu_irq_exit_check_preempt();
|
|
|
|
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
|
|
|
WARN_ON_ONCE(!on_thread_stack());
|
|
|
|
if (need_resched())
|
|
|
|
preempt_schedule_irq();
|
|
|
|
}
|
|
|
|
}
|
2021-01-18 14:12:22 +00:00
|
|
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.
On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:
* Hardware and software control flow integrity schemes can require the
addition of "landing pad" instructions (e.g. `BTI` for arm64), which
will also be present at the "real" callee.
* Limited branch ranges can require that trampolines generate or load an
address into a register and perform an indirect branch (or at least
have a slow path that does so). This loses some of the benefits of
having a direct branch.
* Interaction with SW CFI schemes can be complicated and fragile, e.g.
requiring that we can recognise idiomatic codegen and remove
indirections understand, at least until clang proves more helpful
mechanisms for dealing with this.
For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.
For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
2022-02-14 16:52:14 +00:00
|
|
|
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
2022-02-14 16:52:12 +00:00
|
|
|
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
|
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.
On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:
* Hardware and software control flow integrity schemes can require the
addition of "landing pad" instructions (e.g. `BTI` for arm64), which
will also be present at the "real" callee.
* Limited branch ranges can require that trampolines generate or load an
address into a register and perform an indirect branch (or at least
have a slow path that does so). This loses some of the benefits of
having a direct branch.
* Interaction with SW CFI schemes can be complicated and fragile, e.g.
requiring that we can recognise idiomatic codegen and remove
indirections understand, at least until clang proves more helpful
mechanisms for dealing with this.
For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.
For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
2022-02-14 16:52:14 +00:00
|
|
|
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
|
|
|
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
|
|
|
void dynamic_irqentry_exit_cond_resched(void)
|
|
|
|
{
|
2022-03-30 08:43:28 +00:00
|
|
|
if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
|
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.
On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:
* Hardware and software control flow integrity schemes can require the
addition of "landing pad" instructions (e.g. `BTI` for arm64), which
will also be present at the "real" callee.
* Limited branch ranges can require that trampolines generate or load an
address into a register and perform an indirect branch (or at least
have a slow path that does so). This loses some of the benefits of
having a direct branch.
* Interaction with SW CFI schemes can be complicated and fragile, e.g.
requiring that we can recognise idiomatic codegen and remove
indirections understand, at least until clang proves more helpful
mechanisms for dealing with this.
For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.
For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
2022-02-14 16:52:14 +00:00
|
|
|
return;
|
|
|
|
raw_irqentry_exit_cond_resched();
|
|
|
|
}
|
|
|
|
#endif
|
2021-01-18 14:12:22 +00:00
|
|
|
#endif
|
2020-07-22 21:59:58 +00:00
|
|
|
|
2020-07-25 09:19:51 +00:00
|
|
|
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
2020-07-22 21:59:58 +00:00
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
|
|
|
|
/* Check whether this returns to user mode */
|
|
|
|
if (user_mode(regs)) {
|
|
|
|
irqentry_exit_to_user_mode(regs);
|
|
|
|
} else if (!regs_irqs_disabled(regs)) {
|
|
|
|
/*
|
|
|
|
* If RCU was not watching on entry this needs to be done
|
|
|
|
* carefully and needs the same ordering of lockdep/tracing
|
|
|
|
* and RCU as the return to user mode path.
|
|
|
|
*/
|
|
|
|
if (state.exit_rcu) {
|
|
|
|
instrumentation_begin();
|
|
|
|
/* Tell the tracer that IRET will enable interrupts */
|
|
|
|
trace_hardirqs_on_prepare();
|
2022-03-14 22:19:03 +00:00
|
|
|
lockdep_hardirqs_on_prepare();
|
2020-07-22 21:59:58 +00:00
|
|
|
instrumentation_end();
|
2022-06-08 14:40:26 +00:00
|
|
|
ct_irq_exit();
|
2020-07-22 21:59:58 +00:00
|
|
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
instrumentation_begin();
|
2022-02-14 16:52:12 +00:00
|
|
|
if (IS_ENABLED(CONFIG_PREEMPTION))
|
2020-07-22 21:59:58 +00:00
|
|
|
irqentry_exit_cond_resched();
|
2022-02-14 16:52:12 +00:00
|
|
|
|
2020-07-22 21:59:58 +00:00
|
|
|
/* Covers both tracing and lockdep */
|
|
|
|
trace_hardirqs_on();
|
|
|
|
instrumentation_end();
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* IRQ flags state is correct already. Just tell RCU if it
|
|
|
|
* was not watching on entry.
|
|
|
|
*/
|
|
|
|
if (state.exit_rcu)
|
2022-06-08 14:40:26 +00:00
|
|
|
ct_irq_exit();
|
2020-07-22 21:59:58 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-02 20:53:16 +00:00
|
|
|
|
|
|
|
irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
irqentry_state_t irq_state;
|
|
|
|
|
|
|
|
irq_state.lockdep = lockdep_hardirqs_enabled();
|
|
|
|
|
|
|
|
__nmi_enter();
|
|
|
|
lockdep_hardirqs_off(CALLER_ADDR0);
|
|
|
|
lockdep_hardirq_enter();
|
2022-06-08 14:40:27 +00:00
|
|
|
ct_nmi_enter();
|
2020-11-02 20:53:16 +00:00
|
|
|
|
|
|
|
instrumentation_begin();
|
2022-09-15 15:04:14 +00:00
|
|
|
kmsan_unpoison_entry_regs(regs);
|
2020-11-02 20:53:16 +00:00
|
|
|
trace_hardirqs_off_finish();
|
|
|
|
ftrace_nmi_enter();
|
|
|
|
instrumentation_end();
|
|
|
|
|
|
|
|
return irq_state;
|
|
|
|
}
|
|
|
|
|
|
|
|
void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
|
|
|
|
{
|
|
|
|
instrumentation_begin();
|
|
|
|
ftrace_nmi_exit();
|
|
|
|
if (irq_state.lockdep) {
|
|
|
|
trace_hardirqs_on_prepare();
|
2022-03-14 22:19:03 +00:00
|
|
|
lockdep_hardirqs_on_prepare();
|
2020-11-02 20:53:16 +00:00
|
|
|
}
|
|
|
|
instrumentation_end();
|
|
|
|
|
2022-06-08 14:40:27 +00:00
|
|
|
ct_nmi_exit();
|
2020-11-02 20:53:16 +00:00
|
|
|
lockdep_hardirq_exit();
|
|
|
|
if (irq_state.lockdep)
|
|
|
|
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
|
|
__nmi_exit();
|
|
|
|
}
|