2019-06-03 05:44:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 11:49:34 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/exception.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_EXCEPTION_H
|
|
|
|
#define __ASM_EXCEPTION_H
|
|
|
|
|
2018-01-15 19:38:59 +00:00
|
|
|
#include <asm/esr.h>
|
2019-10-25 16:42:10 +00:00
|
|
|
#include <asm/kprobes.h>
|
2019-10-25 16:42:11 +00:00
|
|
|
#include <asm/ptrace.h>
|
2018-01-15 19:38:59 +00:00
|
|
|
|
2016-03-25 21:22:05 +00:00
|
|
|
#include <linux/interrupt.h>
|
2015-08-12 14:16:19 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
#define __exception_irq_entry __irq_entry
|
|
|
|
#else
|
2019-10-25 16:42:10 +00:00
|
|
|
#define __exception_irq_entry __kprobes
|
2015-08-12 14:16:19 +00:00
|
|
|
#endif
|
2012-03-05 11:49:34 +00:00
|
|
|
|
2018-01-15 19:38:59 +00:00
|
|
|
static inline u32 disr_to_esr(u64 disr)
|
|
|
|
{
|
|
|
|
unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
|
|
|
|
|
|
|
|
if ((disr & DISR_EL1_IDS) == 0)
|
|
|
|
esr |= (disr & DISR_EL1_ESR_MASK);
|
|
|
|
else
|
|
|
|
esr |= (disr & ESR_ELx_ISS_MASK);
|
|
|
|
|
|
|
|
return esr;
|
|
|
|
}
|
|
|
|
|
arm64: entry: handle all vectors with C
We have 16 architectural exception vectors, and depending on kernel
configuration we handle 8 or 12 of these with C code, with the remaining
8 or 4 of these handled as special cases in the entry assembly.
It would be nicer if the entry assembly were uniform for all exceptions,
and we deferred any specific handling of the exceptions to C code. This
way the entry assembly can be more easily templated without ifdeffery or
special cases, and it's easier to modify the handling of these cases in
future (e.g. to dump additional registers other context).
This patch reworks the entry code so that we always have a C handler for
every architectural exception vector, with the entry assembly being
completely uniform. We now have to handle exceptions from EL1t and EL1h,
and also have to handle exceptions from AArch32 even when the kernel is
built without CONFIG_COMPAT. To make this clear and to simplify
templating, we rename the top-level exception handlers with a consistent
naming scheme:
asm: <el+sp>_<regsize>_<type>
c: <el+sp>_<regsize>_<type>_handler
.. where:
<el+sp> is `el1t`, `el1h`, or `el0t`
<regsize> is `64` or `32`
<type> is `sync`, `irq`, `fiq`, or `error`
... e.g.
asm: el1h_64_sync
c: el1h_64_sync_handler
... with lower-level handlers simply using "el1" and "compat" as today.
For unexpected exceptions, this information is passed to
__panic_unhandled(), so it can report the specific vector an unexpected
exception was taken from, e.g.
| Unhandled 64-bit el1t sync exception
For vectors we never expect to enter legitimately, the C code is
generated using a macro to avoid code duplication. The exceptions are
handled via __panic_unhandled(), replacing bad_mode() (which is
removed).
The `kernel_ventry` and `entry_handler` assembly macros are updated to
handle the new naming scheme. In theory it should be possible to
generate the entry functions at the same time as the vectors using a
single table, but this will require reworking the linker script to split
the two into separate sections, so for now we have separate tables.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
|
|
|
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
|
|
|
|
|
|
|
|
asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
|
|
|
|
|
|
|
|
asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
|
|
|
|
|
|
|
|
asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
|
|
|
|
asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
|
2020-12-14 11:33:53 +00:00
|
|
|
|
2021-06-07 09:46:10 +00:00
|
|
|
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
|
|
|
|
void (*func)(struct pt_regs *));
|
2019-08-20 17:45:57 +00:00
|
|
|
asmlinkage void enter_from_user_mode(void);
|
arm64: entry: fix non-NMI user<->kernel transitions
When built with PROVE_LOCKING, NO_HZ_FULL, and CONTEXT_TRACKING_FORCE
will WARN() at boot time that interrupts are enabled when we call
context_tracking_user_enter(), despite the DAIF flags indicating that
IRQs are masked.
The problem is that we're not tracking IRQ flag changes accurately, and
so lockdep believes interrupts are enabled when they are not (and
vice-versa). We can shuffle things so to make this more accurate. For
kernel->user transitions there are a number of constraints we need to
consider:
1) When we call __context_tracking_user_enter() HW IRQs must be disabled
and lockdep must be up-to-date with this.
2) Userspace should be treated as having IRQs enabled from the PoV of
both lockdep and tracing.
3) As context_tracking_user_enter() stops RCU from watching, we cannot
use RCU after calling it.
4) IRQ flag tracing and lockdep have state that must be manipulated
before RCU is disabled.
... with similar constraints applying for user->kernel transitions, with
the ordering reversed.
The generic entry code has enter_from_user_mode() and
exit_to_user_mode() helpers to handle this. We can't use those directly,
so we add arm64 copies for now (without the instrumentation markers
which aren't used on arm64). These replace the existing user_exit() and
user_exit_irqoff() calls spread throughout handlers, and the exception
unmasking is left as-is.
Note that:
* The accounting for debug exceptions from userspace now happens in
el0_dbg() and ret_to_user(), so this is removed from
debug_exception_enter() and debug_exception_exit(). As
user_exit_irqoff() wakes RCU, the userspace-specific check is removed.
* The accounting for syscalls now happens in el0_svc(),
el0_svc_compat(), and ret_to_user(), so this is removed from
el0_svc_common(). This does not adversely affect the workaround for
erratum 1463225, as this does not depend on any of the state tracking.
* In ret_to_user() we mask interrupts with local_daif_mask(), and so we
need to inform lockdep and tracing. Here a trace_hardirqs_off() is
sufficient and safe as we have not yet exited kernel context and RCU
is usable.
* As PROVE_LOCKING selects TRACE_IRQFLAGS, the ifdeferry in entry.S only
needs to check for the latter.
* EL0 SError handling will be dealt with in a subsequent patch, as this
needs to be treated as an NMI.
Prior to this patch, booting an appropriately-configured kernel would
result in spats as below:
| DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())
| WARNING: CPU: 2 PID: 1 at kernel/locking/lockdep.c:5280 check_flags.part.54+0x1dc/0x1f0
| Modules linked in:
| CPU: 2 PID: 1 Comm: init Not tainted 5.10.0-rc3 #3
| Hardware name: linux,dummy-virt (DT)
| pstate: 804003c5 (Nzcv DAIF +PAN -UAO -TCO BTYPE=--)
| pc : check_flags.part.54+0x1dc/0x1f0
| lr : check_flags.part.54+0x1dc/0x1f0
| sp : ffff80001003bd80
| x29: ffff80001003bd80 x28: ffff66ce801e0000
| x27: 00000000ffffffff x26: 00000000000003c0
| x25: 0000000000000000 x24: ffffc31842527258
| x23: ffffc31842491368 x22: ffffc3184282d000
| x21: 0000000000000000 x20: 0000000000000001
| x19: ffffc318432ce000 x18: 0080000000000000
| x17: 0000000000000000 x16: ffffc31840f18a78
| x15: 0000000000000001 x14: ffffc3184285c810
| x13: 0000000000000001 x12: 0000000000000000
| x11: ffffc318415857a0 x10: ffffc318406614c0
| x9 : ffffc318415857a0 x8 : ffffc31841f1d000
| x7 : 647261685f706564 x6 : ffffc3183ff7c66c
| x5 : ffff66ce801e0000 x4 : 0000000000000000
| x3 : ffffc3183fe00000 x2 : ffffc31841500000
| x1 : e956dc24146b3500 x0 : 0000000000000000
| Call trace:
| check_flags.part.54+0x1dc/0x1f0
| lock_is_held_type+0x10c/0x188
| rcu_read_lock_sched_held+0x70/0x98
| __context_tracking_enter+0x310/0x350
| context_tracking_enter.part.3+0x5c/0xc8
| context_tracking_user_enter+0x6c/0x80
| finish_ret_to_user+0x2c/0x13cr
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201130115950.22492-8-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2020-11-30 11:59:46 +00:00
|
|
|
asmlinkage void exit_to_user_mode(void);
|
arm64: entry: fix NMI {user, kernel}->kernel transitions
Exceptions which can be taken at (almost) any time are consdiered to be
NMIs. On arm64 that includes:
* SDEI events
* GICv3 Pseudo-NMIs
* Kernel stack overflows
* Unexpected/unhandled exceptions
... but currently debug exceptions (BRKs, breakpoints, watchpoints,
single-step) are not considered NMIs.
As these can be taken at any time, kernel features (lockdep, RCU,
ftrace) may not be in a consistent kernel state. For example, we may
take an NMI from the idle code or partway through an entry/exit path.
While nmi_enter() and nmi_exit() handle most of this state, notably they
don't save/restore the lockdep state across an NMI being taken and
handled. When interrupts are enabled and an NMI is taken, lockdep may
see interrupts become disabled within the NMI code, but not see
interrupts become enabled when returning from the NMI, leaving lockdep
believing interrupts are disabled when they are actually disabled.
The x86 code handles this in idtentry_{enter,exit}_nmi(), which will
shortly be moved to the generic entry code. As we can't use either yet,
we copy the x86 approach in arm64-specific helpers. All the NMI
entrypoints are marked as noinstr to prevent any instrumentation
handling code being invoked before the state has been corrected.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201130115950.22492-11-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2020-11-30 11:59:49 +00:00
|
|
|
void arm64_enter_nmi(struct pt_regs *regs);
|
|
|
|
void arm64_exit_nmi(struct pt_regs *regs);
|
2020-11-20 20:33:46 +00:00
|
|
|
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
|
2019-10-25 16:42:15 +00:00
|
|
|
void do_undefinstr(struct pt_regs *regs);
|
2020-03-16 16:50:45 +00:00
|
|
|
void do_bti(struct pt_regs *regs);
|
2019-10-25 16:42:15 +00:00
|
|
|
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
|
|
|
|
struct pt_regs *regs);
|
|
|
|
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
|
|
|
|
void do_sve_acc(unsigned int esr, struct pt_regs *regs);
|
|
|
|
void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs);
|
|
|
|
void do_sysinstr(unsigned int esr, struct pt_regs *regs);
|
|
|
|
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
|
|
|
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
|
|
|
|
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
|
2020-01-16 18:35:47 +00:00
|
|
|
void do_el0_svc(struct pt_regs *regs);
|
|
|
|
void do_el0_svc_compat(struct pt_regs *regs);
|
2020-09-14 08:36:53 +00:00
|
|
|
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
|
2021-06-07 09:46:07 +00:00
|
|
|
void do_serror(struct pt_regs *regs, unsigned int esr);
|
2012-03-05 11:49:34 +00:00
|
|
|
#endif /* __ASM_EXCEPTION_H */
|