2020-09-15 23:30:17 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
|
/*
|
|
|
|
|
* Interface for managing mitigations for Spectre vulnerabilities.
|
|
|
|
|
*
|
|
|
|
|
* Copyright (C) 2020 Google LLC
|
|
|
|
|
* Author: Will Deacon <will@kernel.org>
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#ifndef __ASM_SPECTRE_H
|
|
|
|
|
#define __ASM_SPECTRE_H
|
|
|
|
|
|
2020-11-13 11:38:42 +00:00
|
|
|
#define BP_HARDEN_EL2_SLOTS 4
|
2020-11-13 11:38:47 +00:00
|
|
|
#define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K)
|
2020-11-13 11:38:42 +00:00
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
|
|
2020-09-15 23:30:17 +01:00
|
|
|
#include <asm/cpufeature.h>
|
2020-11-13 11:38:42 +00:00
|
|
|
#include <asm/virt.h>
|
2020-09-15 23:30:17 +01:00
|
|
|
|
|
|
|
|
/* Watch out, ordering is important here. */
|
|
|
|
|
enum mitigation_state {
|
|
|
|
|
SPECTRE_UNAFFECTED,
|
|
|
|
|
SPECTRE_MITIGATED,
|
|
|
|
|
SPECTRE_VULNERABLE,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct task_struct;
|
|
|
|
|
|
2020-11-13 11:38:44 +00:00
|
|
|
/*
|
|
|
|
|
* Note: the order of this enum corresponds to __bp_harden_hyp_vecs and
|
|
|
|
|
* we rely on having the direct vectors first.
|
|
|
|
|
*/
|
|
|
|
|
enum arm64_hyp_spectre_vector {
|
|
|
|
|
/*
|
|
|
|
|
* Take exceptions directly to __kvm_hyp_vector. This must be
|
|
|
|
|
* 0 so that it used by default when mitigations are not needed.
|
|
|
|
|
*/
|
|
|
|
|
HYP_VECTOR_DIRECT,
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Bounce via a slot in the hypervisor text mapping of
|
|
|
|
|
* __bp_harden_hyp_vecs, which contains an SMC call.
|
|
|
|
|
*/
|
|
|
|
|
HYP_VECTOR_SPECTRE_DIRECT,
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
|
|
|
|
|
* next to the idmap page.
|
|
|
|
|
*/
|
|
|
|
|
HYP_VECTOR_INDIRECT,
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
|
|
|
|
|
* next to the idmap page, which contains an SMC call.
|
|
|
|
|
*/
|
|
|
|
|
HYP_VECTOR_SPECTRE_INDIRECT,
|
|
|
|
|
};
|
|
|
|
|
|
2020-11-13 11:38:42 +00:00
|
|
|
typedef void (*bp_hardening_cb_t)(void);
|
|
|
|
|
|
|
|
|
|
struct bp_hardening_data {
|
2020-11-13 11:38:44 +00:00
|
|
|
enum arm64_hyp_spectre_vector slot;
|
|
|
|
|
bp_hardening_cb_t fn;
|
2020-11-13 11:38:42 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
|
|
|
|
|
arm64: prevent instrumentation of bp hardening callbacks
We may call arm64_apply_bp_hardening() early during entry (e.g. in
el0_ia()) before it is safe to run instrumented code. Unfortunately this
may result in running instrumented code in two cases:
* The hardening callbacks called by arm64_apply_bp_hardening() are not
marked as `noinstr`, and have been observed to be instrumented when
compiled with either GCC or LLVM.
* Since arm64_apply_bp_hardening() itself is only marked as `inline`
rather than `__always_inline`, it is possible that the compiler
decides to place it out-of-line, whereupon it may be instrumented.
For example, with defconfig built with clang 13.0.0,
call_hvc_arch_workaround_1() is compiled as:
| <call_hvc_arch_workaround_1>:
| d503233f paciasp
| f81f0ffe str x30, [sp, #-16]!
| 320183e0 mov w0, #0x80008000
| d503201f nop
| d4000002 hvc #0x0
| f84107fe ldr x30, [sp], #16
| d50323bf autiasp
| d65f03c0 ret
... but when CONFIG_FTRACE=y and CONFIG_KCOV=y this is compiled as:
| <call_hvc_arch_workaround_1>:
| d503245f bti c
| d503201f nop
| d503201f nop
| d503233f paciasp
| a9bf7bfd stp x29, x30, [sp, #-16]!
| 910003fd mov x29, sp
| 94000000 bl 0 <__sanitizer_cov_trace_pc>
| 320183e0 mov w0, #0x80008000
| d503201f nop
| d4000002 hvc #0x0
| a8c17bfd ldp x29, x30, [sp], #16
| d50323bf autiasp
| d65f03c0 ret
... with a patchable function entry registered with ftrace, and a direct
call to __sanitizer_cov_trace_pc(). Neither of these are safe early
during entry sequences.
This patch avoids the unsafe instrumentation by marking
arm64_apply_bp_hardening() as `__always_inline` and by marking the
hardening functions as `noinstr`. This avoids the potential for
instrumentation, and causes clang to consistently generate the function
as with the defconfig sample.
Note: in the defconfig compilation, when CONFIG_SVE=y, x30 is spilled to
the stack without being placed in a frame record, which will result in a
missing entry if call_hvc_arch_workaround_1() is backtraced. Similar is
true of qcom_link_stack_sanitisation(), where inline asm spills the LR
to a GPR prior to corrupting it. This is not a significant issue
presently as we will only backtrace here if an exception is taken, and
in such cases we may omit entries for other reasons today.
The relevant hardening functions were introduced in commits:
ec82b567a74fbdff ("arm64: Implement branch predictor hardening for Falkor")
b092201e00206141 ("arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support")
... and these were subsequently moved in commit:
d4647f0a2ad71110 ("arm64: Rewrite Spectre-v2 mitigation code")
The arm64_apply_bp_hardening() function was introduced in commit:
0f15adbb2861ce6f ("arm64: Add skeleton to harden the branch predictor against aliasing attacks")
... and was subsequently moved and reworked in commit:
6279017e807708a0 ("KVM: arm64: Move BP hardening helpers into spectre.h")
Fixes: ec82b567a74fbdff ("arm64: Implement branch predictor hardening for Falkor")
Fixes: b092201e00206141 ("arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support")
Fixes: d4647f0a2ad71110 ("arm64: Rewrite Spectre-v2 mitigation code")
Fixes: 0f15adbb2861ce6f ("arm64: Add skeleton to harden the branch predictor against aliasing attacks")
Fixes: 6279017e807708a0 ("KVM: arm64: Move BP hardening helpers into spectre.h")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Acked-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20220224181028.512873-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2022-02-24 18:10:28 +00:00
|
|
|
/* Called during entry so must be __always_inline */
|
|
|
|
|
static __always_inline void arm64_apply_bp_hardening(void)
|
2020-11-13 11:38:42 +00:00
|
|
|
{
|
|
|
|
|
struct bp_hardening_data *d;
|
|
|
|
|
|
|
|
|
|
if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
d = this_cpu_ptr(&bp_hardening_data);
|
|
|
|
|
if (d->fn)
|
|
|
|
|
d->fn();
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-15 23:30:17 +01:00
|
|
|
enum mitigation_state arm64_get_spectre_v2_state(void);
|
|
|
|
|
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
|
|
|
|
|
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
|
|
|
|
|
2020-11-13 11:38:46 +00:00
|
|
|
bool has_spectre_v3a(const struct arm64_cpu_capabilities *cap, int scope);
|
2020-11-13 11:38:45 +00:00
|
|
|
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
2020-11-13 11:38:44 +00:00
|
|
|
|
2020-09-18 11:54:33 +01:00
|
|
|
enum mitigation_state arm64_get_spectre_v4_state(void);
|
|
|
|
|
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
|
|
|
|
|
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
|
|
|
|
void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
|
|
|
|
|
|
2020-11-26 17:25:30 +00:00
|
|
|
enum mitigation_state arm64_get_meltdown_state(void);
|
|
|
|
|
|
2022-02-08 16:08:13 +00:00
|
|
|
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
arm64: Mitigate spectre style branch history side channels
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
2021-11-10 14:48:00 +00:00
|
|
|
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
|
|
|
|
u8 spectre_bhb_loop_affected(int scope);
|
|
|
|
|
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
2020-11-13 11:38:42 +00:00
|
|
|
#endif /* __ASSEMBLY__ */
|
2020-09-15 23:30:17 +01:00
|
|
|
#endif /* __ASM_SPECTRE_H */
|