2019-06-03 07:44:50 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2015-10-25 08:01:56 +00:00
|
|
|
/*
|
2018-04-10 11:36:45 +01:00
|
|
|
* Copyright (C) 2015-2018 - ARM Ltd
|
2015-10-25 08:01:56 +00:00
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
|
*/
|
|
|
|
|
|
2018-02-06 17:56:15 +00:00
|
|
|
#include <linux/arm-smccc.h>
|
2015-10-25 08:01:56 +00:00
|
|
|
#include <linux/linkage.h>
|
|
|
|
|
|
|
|
|
|
#include <asm/alternative.h>
|
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
|
#include <asm/kvm_arm.h>
|
|
|
|
|
#include <asm/kvm_asm.h>
|
2018-04-10 11:36:45 +01:00
|
|
|
#include <asm/mmu.h>
|
2020-11-13 11:38:42 +00:00
|
|
|
#include <asm/spectre.h>
|
2015-10-25 08:01:56 +00:00
|
|
|
|
2020-08-21 15:07:05 +01:00
|
|
|
.macro save_caller_saved_regs_vect
|
|
|
|
|
/* x0 and x1 were saved in the vector entry */
|
|
|
|
|
stp x2, x3, [sp, #-16]!
|
|
|
|
|
stp x4, x5, [sp, #-16]!
|
|
|
|
|
stp x6, x7, [sp, #-16]!
|
|
|
|
|
stp x8, x9, [sp, #-16]!
|
|
|
|
|
stp x10, x11, [sp, #-16]!
|
|
|
|
|
stp x12, x13, [sp, #-16]!
|
|
|
|
|
stp x14, x15, [sp, #-16]!
|
|
|
|
|
stp x16, x17, [sp, #-16]!
|
|
|
|
|
.endm
|
|
|
|
|
|
|
|
|
|
.macro restore_caller_saved_regs_vect
|
|
|
|
|
ldp x16, x17, [sp], #16
|
|
|
|
|
ldp x14, x15, [sp], #16
|
|
|
|
|
ldp x12, x13, [sp], #16
|
|
|
|
|
ldp x10, x11, [sp], #16
|
|
|
|
|
ldp x8, x9, [sp], #16
|
|
|
|
|
ldp x6, x7, [sp], #16
|
|
|
|
|
ldp x4, x5, [sp], #16
|
|
|
|
|
ldp x2, x3, [sp], #16
|
|
|
|
|
ldp x0, x1, [sp], #16
|
|
|
|
|
.endm
|
|
|
|
|
|
2015-10-25 08:01:56 +00:00
|
|
|
.text
|
|
|
|
|
|
|
|
|
|
el1_sync: // Guest trapped into EL2
|
|
|
|
|
|
2017-10-08 17:01:56 +02:00
|
|
|
mrs x0, esr_el2
|
KVM: arm64: Extract ESR_ELx.EC only
Since ARMv8.0 the upper 32 bits of ESR_ELx have been RES0, and recently
some of the upper bits gained a meaning and can be non-zero. For
example, when FEAT_LS64 is implemented, ESR_ELx[36:32] contain ISS2,
which for an ST64BV or ST64BV0 can be non-zero. This can be seen in ARM
DDI 0487G.b, page D13-3145, section D13.2.37.
Generally, we must not rely on RES0 bit remaining zero in future, and
when extracting ESR_ELx.EC we must mask out all other bits.
All C code uses the ESR_ELx_EC() macro, which masks out the irrelevant
bits, and therefore no alterations are required to C code to avoid
consuming irrelevant bits.
In a couple of places the KVM assembly extracts ESR_ELx.EC using LSR on
an X register, and so could in theory consume previously RES0 bits. In
both cases this is for comparison with EC values ESR_ELx_EC_HVC32 and
ESR_ELx_EC_HVC64, for which the upper bits of ESR_ELx must currently be
zero, but this could change in future.
This patch adjusts the KVM vectors to use UBFX rather than LSR to
extract ESR_ELx.EC, ensuring these are robust to future additions to
ESR_ELx.
Cc: stable@vger.kernel.org
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Alexandru Elisei <alexandru.elisei@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211103110545.4613-1-mark.rutland@arm.com
2021-11-03 11:05:45 +00:00
|
|
|
ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
|
2016-08-30 21:08:32 -05:00
|
|
|
cmp x0, #ESR_ELx_EC_HVC64
|
2018-02-06 17:56:15 +00:00
|
|
|
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
|
2015-10-25 08:01:56 +00:00
|
|
|
b.ne el1_trap
|
|
|
|
|
|
2018-02-06 17:56:15 +00:00
|
|
|
/*
|
|
|
|
|
* Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
|
|
|
|
|
* The workaround has already been applied on the host,
|
|
|
|
|
* so let's quickly get back to the guest. We don't bother
|
|
|
|
|
* restoring x1, as it can be clobbered anyway.
|
|
|
|
|
*/
|
|
|
|
|
ldr x1, [sp] // Guest's x0
|
|
|
|
|
eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
|
2018-05-29 13:11:17 +01:00
|
|
|
cbz w1, wa_epilogue
|
|
|
|
|
|
|
|
|
|
/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
|
|
|
|
|
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
|
|
|
|
|
ARM_SMCCC_ARCH_WORKAROUND_2)
|
arm64: Mitigate spectre style branch history side channels
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
2021-11-10 14:48:00 +00:00
|
|
|
cbz w1, wa_epilogue
|
|
|
|
|
|
|
|
|
|
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
|
|
|
|
|
ARM_SMCCC_ARCH_WORKAROUND_3)
|
2018-02-06 17:56:15 +00:00
|
|
|
cbnz w1, el1_trap
|
2018-05-29 13:11:17 +01:00
|
|
|
|
|
|
|
|
wa_epilogue:
|
|
|
|
|
mov x0, xzr
|
2018-02-06 17:56:15 +00:00
|
|
|
add sp, sp, #16
|
|
|
|
|
eret
|
2018-06-14 11:23:38 +01:00
|
|
|
sb
|
2018-02-06 17:56:15 +00:00
|
|
|
|
2015-10-25 08:01:56 +00:00
|
|
|
el1_trap:
|
2017-10-08 17:01:56 +02:00
|
|
|
get_vcpu_ptr x1, x0
|
2016-08-30 21:08:32 -05:00
|
|
|
mov x0, #ARM_EXCEPTION_TRAP
|
2015-10-25 08:01:56 +00:00
|
|
|
b __guest_exit
|
|
|
|
|
|
|
|
|
|
el1_irq:
|
2021-02-19 16:39:31 +00:00
|
|
|
el1_fiq:
|
2017-10-08 17:01:56 +02:00
|
|
|
get_vcpu_ptr x1, x0
|
2016-08-30 21:08:32 -05:00
|
|
|
mov x0, #ARM_EXCEPTION_IRQ
|
2015-10-25 08:01:56 +00:00
|
|
|
b __guest_exit
|
|
|
|
|
|
2016-09-06 14:02:04 +01:00
|
|
|
el1_error:
|
2017-10-08 17:01:56 +02:00
|
|
|
get_vcpu_ptr x1, x0
|
2016-09-06 14:02:04 +01:00
|
|
|
mov x0, #ARM_EXCEPTION_EL1_SERROR
|
|
|
|
|
b __guest_exit
|
|
|
|
|
|
2018-10-17 20:21:16 +02:00
|
|
|
el2_sync:
|
2020-08-21 15:07:06 +01:00
|
|
|
/* Check for illegal exception return */
|
2018-10-17 20:21:16 +02:00
|
|
|
mrs x0, spsr_el2
|
2020-08-21 15:07:06 +01:00
|
|
|
tbnz x0, #20, 1f
|
2018-10-17 20:21:16 +02:00
|
|
|
|
2020-08-21 15:07:06 +01:00
|
|
|
save_caller_saved_regs_vect
|
|
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
|
bl kvm_unexpected_el2_exception
|
|
|
|
|
ldp x29, x30, [sp], #16
|
|
|
|
|
restore_caller_saved_regs_vect
|
|
|
|
|
|
|
|
|
|
eret
|
2018-10-17 20:21:16 +02:00
|
|
|
|
2020-08-21 15:07:06 +01:00
|
|
|
1:
|
2018-10-17 20:21:16 +02:00
|
|
|
/* Let's attempt a recovery from the illegal exception return */
|
|
|
|
|
get_vcpu_ptr x1, x0
|
|
|
|
|
mov x0, #ARM_EXCEPTION_IL
|
|
|
|
|
b __guest_exit
|
|
|
|
|
|
|
|
|
|
|
2016-09-06 14:02:07 +01:00
|
|
|
el2_error:
|
2020-08-21 15:07:05 +01:00
|
|
|
save_caller_saved_regs_vect
|
|
|
|
|
stp x29, x30, [sp, #-16]!
|
|
|
|
|
|
|
|
|
|
bl kvm_unexpected_el2_exception
|
|
|
|
|
|
|
|
|
|
ldp x29, x30, [sp], #16
|
|
|
|
|
restore_caller_saved_regs_vect
|
2018-02-12 17:53:00 +00:00
|
|
|
|
2016-09-06 14:02:07 +01:00
|
|
|
eret
|
2018-06-14 11:23:38 +01:00
|
|
|
sb
|
2016-09-06 14:02:07 +01:00
|
|
|
|
2020-09-15 11:46:34 +01:00
|
|
|
.macro invalid_vector label, target = __guest_exit_panic
|
2015-10-25 08:01:56 +00:00
|
|
|
.align 2
|
2021-02-22 16:49:56 +00:00
|
|
|
SYM_CODE_START_LOCAL(\label)
|
2015-10-25 08:01:56 +00:00
|
|
|
b \target
|
2020-02-18 19:58:37 +00:00
|
|
|
SYM_CODE_END(\label)
|
2015-10-25 08:01:56 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
|
|
/* None of these should ever happen */
|
|
|
|
|
invalid_vector el2t_sync_invalid
|
|
|
|
|
invalid_vector el2t_irq_invalid
|
|
|
|
|
invalid_vector el2t_fiq_invalid
|
|
|
|
|
invalid_vector el2t_error_invalid
|
|
|
|
|
invalid_vector el2h_irq_invalid
|
|
|
|
|
invalid_vector el2h_fiq_invalid
|
|
|
|
|
|
|
|
|
|
.ltorg
|
|
|
|
|
|
|
|
|
|
.align 11
|
|
|
|
|
|
2019-06-18 16:17:34 +01:00
|
|
|
.macro check_preamble_length start, end
|
|
|
|
|
/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
|
|
|
|
|
.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
|
|
|
|
|
.error "KVM vector preamble length mismatch"
|
|
|
|
|
.endif
|
|
|
|
|
.endm
|
|
|
|
|
|
2018-02-12 17:53:00 +00:00
|
|
|
.macro valid_vect target
|
|
|
|
|
.align 7
|
2019-06-18 16:17:34 +01:00
|
|
|
661:
|
2019-06-18 16:17:36 +01:00
|
|
|
esb
|
2018-02-12 17:53:00 +00:00
|
|
|
stp x0, x1, [sp, #-16]!
|
2019-06-18 16:17:34 +01:00
|
|
|
662:
|
2018-02-12 17:53:00 +00:00
|
|
|
b \target
|
2019-06-18 16:17:34 +01:00
|
|
|
|
|
|
|
|
check_preamble_length 661b, 662b
|
2018-02-12 17:53:00 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
|
|
.macro invalid_vect target
|
|
|
|
|
.align 7
|
2019-06-18 16:17:34 +01:00
|
|
|
661:
|
2019-06-18 16:17:36 +01:00
|
|
|
nop
|
2020-09-15 11:46:34 +01:00
|
|
|
stp x0, x1, [sp, #-16]!
|
2019-06-18 16:17:34 +01:00
|
|
|
662:
|
2018-02-27 17:38:08 +00:00
|
|
|
b \target
|
2019-06-18 16:17:34 +01:00
|
|
|
|
|
|
|
|
check_preamble_length 661b, 662b
|
2018-02-12 17:53:00 +00:00
|
|
|
.endm
|
|
|
|
|
|
2020-02-18 19:58:37 +00:00
|
|
|
SYM_CODE_START(__kvm_hyp_vector)
|
2018-02-12 17:53:00 +00:00
|
|
|
invalid_vect el2t_sync_invalid // Synchronous EL2t
|
|
|
|
|
invalid_vect el2t_irq_invalid // IRQ EL2t
|
|
|
|
|
invalid_vect el2t_fiq_invalid // FIQ EL2t
|
|
|
|
|
invalid_vect el2t_error_invalid // Error EL2t
|
|
|
|
|
|
2018-10-17 20:21:16 +02:00
|
|
|
valid_vect el2_sync // Synchronous EL2h
|
2018-02-12 17:53:00 +00:00
|
|
|
invalid_vect el2h_irq_invalid // IRQ EL2h
|
|
|
|
|
invalid_vect el2h_fiq_invalid // FIQ EL2h
|
|
|
|
|
valid_vect el2_error // Error EL2h
|
|
|
|
|
|
|
|
|
|
valid_vect el1_sync // Synchronous 64-bit EL1
|
|
|
|
|
valid_vect el1_irq // IRQ 64-bit EL1
|
2021-02-19 16:39:31 +00:00
|
|
|
valid_vect el1_fiq // FIQ 64-bit EL1
|
2018-02-12 17:53:00 +00:00
|
|
|
valid_vect el1_error // Error 64-bit EL1
|
|
|
|
|
|
|
|
|
|
valid_vect el1_sync // Synchronous 32-bit EL1
|
|
|
|
|
valid_vect el1_irq // IRQ 32-bit EL1
|
2021-02-19 16:39:31 +00:00
|
|
|
valid_vect el1_fiq // FIQ 32-bit EL1
|
2018-02-12 17:53:00 +00:00
|
|
|
valid_vect el1_error // Error 32-bit EL1
|
2020-02-18 19:58:37 +00:00
|
|
|
SYM_CODE_END(__kvm_hyp_vector)
|
2018-04-10 11:36:45 +01:00
|
|
|
|
2020-11-13 11:38:44 +00:00
|
|
|
.macro spectrev2_smccc_wa1_smc
|
|
|
|
|
sub sp, sp, #(8 * 4)
|
|
|
|
|
stp x2, x3, [sp, #(8 * 0)]
|
|
|
|
|
stp x0, x1, [sp, #(8 * 2)]
|
arm64: Mitigate spectre style branch history side channels
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
2021-11-10 14:48:00 +00:00
|
|
|
alternative_cb spectre_bhb_patch_wa3
|
|
|
|
|
/* Patched to mov WA3 when supported */
|
2020-11-13 11:38:44 +00:00
|
|
|
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
|
arm64: Mitigate spectre style branch history side channels
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
2021-11-10 14:48:00 +00:00
|
|
|
alternative_cb_end
|
2020-11-13 11:38:44 +00:00
|
|
|
smc #0
|
|
|
|
|
ldp x2, x3, [sp, #(8 * 0)]
|
|
|
|
|
add sp, sp, #(8 * 2)
|
|
|
|
|
.endm
|
|
|
|
|
|
|
|
|
|
.macro hyp_ventry indirect, spectrev2
|
|
|
|
|
.align 7
|
2019-06-18 16:17:36 +01:00
|
|
|
1: esb
|
2020-11-13 11:38:44 +00:00
|
|
|
.if \spectrev2 != 0
|
|
|
|
|
spectrev2_smccc_wa1_smc
|
|
|
|
|
.else
|
2019-06-18 16:17:35 +01:00
|
|
|
stp x0, x1, [sp, #-16]!
|
arm64: Mitigate spectre style branch history side channels
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
2021-11-10 14:48:00 +00:00
|
|
|
mitigate_spectre_bhb_loop x0
|
2021-12-10 14:32:56 +00:00
|
|
|
mitigate_spectre_bhb_clear_insn
|
2020-11-13 11:38:44 +00:00
|
|
|
.endif
|
|
|
|
|
.if \indirect != 0
|
|
|
|
|
alternative_cb kvm_patch_vector_branch
|
|
|
|
|
/*
|
2020-11-13 11:38:45 +00:00
|
|
|
* For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
|
2020-11-13 11:38:44 +00:00
|
|
|
*
|
|
|
|
|
* movz x0, #(addr & 0xffff)
|
|
|
|
|
* movk x0, #((addr >> 16) & 0xffff), lsl #16
|
|
|
|
|
* movk x0, #((addr >> 32) & 0xffff), lsl #32
|
|
|
|
|
* br x0
|
|
|
|
|
*
|
|
|
|
|
* Where:
|
|
|
|
|
* addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
|
|
|
|
|
* See kvm_patch_vector_branch for details.
|
|
|
|
|
*/
|
2018-04-10 11:36:45 +01:00
|
|
|
nop
|
|
|
|
|
nop
|
|
|
|
|
nop
|
2020-11-13 11:38:44 +00:00
|
|
|
nop
|
|
|
|
|
alternative_cb_end
|
|
|
|
|
.endif
|
|
|
|
|
b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
|
2018-04-10 11:36:45 +01:00
|
|
|
.endm
|
|
|
|
|
|
2020-11-13 11:38:44 +00:00
|
|
|
.macro generate_vectors indirect, spectrev2
|
2018-04-10 11:36:45 +01:00
|
|
|
0:
|
|
|
|
|
.rept 16
|
2020-11-13 11:38:44 +00:00
|
|
|
hyp_ventry \indirect, \spectrev2
|
2018-04-10 11:36:45 +01:00
|
|
|
.endr
|
|
|
|
|
.org 0b + SZ_2K // Safety measure
|
|
|
|
|
.endm
|
|
|
|
|
|
|
|
|
|
.align 11
|
2020-02-18 19:58:38 +00:00
|
|
|
SYM_CODE_START(__bp_harden_hyp_vecs)
|
2020-11-13 11:38:44 +00:00
|
|
|
generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
|
|
|
|
|
generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
|
|
|
|
|
generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
|
2020-02-18 19:58:38 +00:00
|
|
|
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
|
|
|
|
|
.org 1b
|
|
|
|
|
SYM_CODE_END(__bp_harden_hyp_vecs)
|