2019-06-03 05:44:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-12-17 17:07:52 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/kvm_arm.h>
|
|
|
|
#include <asm/kvm_mmu.h>
|
2015-03-19 16:42:28 +00:00
|
|
|
#include <asm/pgtable-hwdef.h>
|
2016-04-27 16:47:01 +00:00
|
|
|
#include <asm/sysreg.h>
|
2017-04-03 18:37:40 +00:00
|
|
|
#include <asm/virt.h>
|
2012-12-17 17:07:52 +00:00
|
|
|
|
|
|
|
.text
|
|
|
|
.pushsection .hyp.idmap.text, "ax"
|
|
|
|
|
|
|
|
.align 11
|
|
|
|
|
2020-02-18 19:58:37 +00:00
|
|
|
SYM_CODE_START(__kvm_hyp_init)
|
2012-12-17 17:07:52 +00:00
|
|
|
ventry __invalid // Synchronous EL2t
|
|
|
|
ventry __invalid // IRQ EL2t
|
|
|
|
ventry __invalid // FIQ EL2t
|
|
|
|
ventry __invalid // Error EL2t
|
|
|
|
|
|
|
|
ventry __invalid // Synchronous EL2h
|
|
|
|
ventry __invalid // IRQ EL2h
|
|
|
|
ventry __invalid // FIQ EL2h
|
|
|
|
ventry __invalid // Error EL2h
|
|
|
|
|
|
|
|
ventry __do_hyp_init // Synchronous 64-bit EL1
|
|
|
|
ventry __invalid // IRQ 64-bit EL1
|
|
|
|
ventry __invalid // FIQ 64-bit EL1
|
|
|
|
ventry __invalid // Error 64-bit EL1
|
|
|
|
|
|
|
|
ventry __invalid // Synchronous 32-bit EL1
|
|
|
|
ventry __invalid // IRQ 32-bit EL1
|
|
|
|
ventry __invalid // FIQ 32-bit EL1
|
|
|
|
ventry __invalid // Error 32-bit EL1
|
|
|
|
|
|
|
|
__invalid:
|
|
|
|
b .
|
|
|
|
|
|
|
|
/*
|
2016-06-30 17:40:44 +00:00
|
|
|
* x0: HYP pgd
|
|
|
|
* x1: HYP stack
|
|
|
|
* x2: HYP vectors
|
arm64: KVM: Cleanup tpidr_el2 init on non-VHE
When running on a non-VHE system, we initialize tpidr_el2 to
contain the per-CPU offset required to reach per-cpu variables.
Actually, we initialize it twice: the first time as part of the
EL2 initialization, by copying tpidr_el1 into its el2 counterpart,
and another time by calling into __kvm_set_tpidr_el2.
It turns out that the first part is wrong, as it includes the
distance between the kernel mapping and the linear mapping, while
EL2 only cares about the linear mapping. This was the last vestige
of the first per-cpu use of tpidr_el2 that came in with SDEI.
The only caller then was hyp_panic(), and its now using the
pc-relative get_host_ctxt() stuff, instead of kimage addresses
from the literal pool.
It is not a big deal, as we override it straight away, but it is
slightly confusing. In order to clear said confusion, let's
set this directly as part of the hyp-init code, and drop the
ad-hoc HYP helper.
Reviewed-by: James Morse <james.morse@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2018-07-10 12:20:47 +00:00
|
|
|
* x3: per-CPU offset
|
2012-12-17 17:07:52 +00:00
|
|
|
*/
|
|
|
|
__do_hyp_init:
|
2017-04-03 18:37:40 +00:00
|
|
|
/* Check for a stub HVC call */
|
|
|
|
cmp x0, #HVC_STUB_HCALL_NR
|
|
|
|
b.lo __kvm_handle_stub_hvc
|
2012-12-17 17:07:52 +00:00
|
|
|
|
2018-01-29 11:59:57 +00:00
|
|
|
phys_to_ttbr x4, x0
|
2018-07-31 13:08:57 +00:00
|
|
|
alternative_if ARM64_HAS_CNP
|
|
|
|
orr x4, x4, #TTBR_CNP_BIT
|
|
|
|
alternative_else_nop_endif
|
2017-12-13 17:07:18 +00:00
|
|
|
msr ttbr0_el2, x4
|
2012-12-17 17:07:52 +00:00
|
|
|
|
|
|
|
mrs x4, tcr_el1
|
2020-03-04 09:36:31 +00:00
|
|
|
mov_q x5, TCR_EL2_MASK
|
2012-12-17 17:07:52 +00:00
|
|
|
and x4, x4, x5
|
2016-02-10 18:46:53 +00:00
|
|
|
mov x5, #TCR_EL2_RES1
|
2012-12-17 17:07:52 +00:00
|
|
|
orr x4, x4, x5
|
2015-03-19 16:42:28 +00:00
|
|
|
|
|
|
|
/*
|
arm64: allow ID map to be extended to 52 bits
Currently, when using VA_BITS < 48, if the ID map text happens to be
placed in physical memory above VA_BITS, we increase the VA size (up to
48) and create a new table level, in order to map in the ID map text.
This is okay because the system always supports 48 bits of VA.
This patch extends the code such that if the system supports 52 bits of
VA, and the ID map text is placed that high up, then we increase the VA
size accordingly, up to 52.
One difference from the current implementation is that so far the
condition of VA_BITS < 48 has meant that the top level table is always
"full", with the maximum number of entries, and an extra table level is
always needed. Now, when VA_BITS = 48 (and using 64k pages), the top
level table is not full, and we simply need to increase the number of
entries in it, instead of creating a new table level.
Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Bob Picco <bob.picco@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
[catalin.marinas@arm.com: reduce arguments to __create_hyp_mappings()]
[catalin.marinas@arm.com: reworked/renamed __cpu_uses_extended_idmap_level()]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-13 17:07:24 +00:00
|
|
|
* The ID map may be configured to use an extended virtual address
|
|
|
|
* range. This is only the case if system RAM is out of range for the
|
|
|
|
* currently configured page size and VA_BITS, in which case we will
|
|
|
|
* also need the extended virtual range for the HYP ID map, or we won't
|
|
|
|
* be able to enable the EL2 MMU.
|
2015-03-19 16:42:28 +00:00
|
|
|
*
|
|
|
|
* However, at EL2, there is only one TTBR register, and we can't switch
|
|
|
|
* between translation tables *and* update TCR_EL2.T0SZ at the same
|
arm64: allow ID map to be extended to 52 bits
Currently, when using VA_BITS < 48, if the ID map text happens to be
placed in physical memory above VA_BITS, we increase the VA size (up to
48) and create a new table level, in order to map in the ID map text.
This is okay because the system always supports 48 bits of VA.
This patch extends the code such that if the system supports 52 bits of
VA, and the ID map text is placed that high up, then we increase the VA
size accordingly, up to 52.
One difference from the current implementation is that so far the
condition of VA_BITS < 48 has meant that the top level table is always
"full", with the maximum number of entries, and an extra table level is
always needed. Now, when VA_BITS = 48 (and using 64k pages), the top
level table is not full, and we simply need to increase the number of
entries in it, instead of creating a new table level.
Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Bob Picco <bob.picco@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
[catalin.marinas@arm.com: reduce arguments to __create_hyp_mappings()]
[catalin.marinas@arm.com: reworked/renamed __cpu_uses_extended_idmap_level()]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-13 17:07:24 +00:00
|
|
|
* time. Bottom line: we need to use the extended range with *both* our
|
|
|
|
* translation tables.
|
2015-03-19 16:42:28 +00:00
|
|
|
*
|
|
|
|
* So use the same T0SZ value we use for the ID map.
|
|
|
|
*/
|
|
|
|
ldr_l x5, idmap_t0sz
|
|
|
|
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
|
arm64: allow ID map to be extended to 52 bits
Currently, when using VA_BITS < 48, if the ID map text happens to be
placed in physical memory above VA_BITS, we increase the VA size (up to
48) and create a new table level, in order to map in the ID map text.
This is okay because the system always supports 48 bits of VA.
This patch extends the code such that if the system supports 52 bits of
VA, and the ID map text is placed that high up, then we increase the VA
size accordingly, up to 52.
One difference from the current implementation is that so far the
condition of VA_BITS < 48 has meant that the top level table is always
"full", with the maximum number of entries, and an extra table level is
always needed. Now, when VA_BITS = 48 (and using 64k pages), the top
level table is not full, and we simply need to increase the number of
entries in it, instead of creating a new table level.
Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Bob Picco <bob.picco@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
[catalin.marinas@arm.com: reduce arguments to __create_hyp_mappings()]
[catalin.marinas@arm.com: reworked/renamed __cpu_uses_extended_idmap_level()]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-13 17:07:24 +00:00
|
|
|
|
2014-03-07 08:49:25 +00:00
|
|
|
/*
|
2017-12-13 17:07:17 +00:00
|
|
|
* Set the PS bits in TCR_EL2.
|
2014-03-07 08:49:25 +00:00
|
|
|
*/
|
2017-12-13 17:07:17 +00:00
|
|
|
tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6
|
2016-02-10 18:46:53 +00:00
|
|
|
|
|
|
|
msr tcr_el2, x4
|
|
|
|
|
2012-12-17 17:07:52 +00:00
|
|
|
mrs x4, mair_el1
|
|
|
|
msr mair_el2, x4
|
|
|
|
isb
|
|
|
|
|
2014-07-31 06:53:23 +00:00
|
|
|
/* Invalidate the stale TLBs from Bootloader */
|
|
|
|
tlbi alle2
|
|
|
|
dsb sy
|
|
|
|
|
2017-06-06 18:08:33 +00:00
|
|
|
/*
|
|
|
|
* Preserve all the RES1 bits while setting the default flags,
|
2017-06-06 18:08:34 +00:00
|
|
|
* as well as the EE bit on BE. Drop the A flag since the compiler
|
|
|
|
* is allowed to generate unaligned accesses.
|
2017-06-06 18:08:33 +00:00
|
|
|
*/
|
2020-03-04 09:36:31 +00:00
|
|
|
mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
|
2017-06-06 18:08:33 +00:00
|
|
|
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
|
2012-12-17 17:07:52 +00:00
|
|
|
msr sctlr_el2, x4
|
|
|
|
isb
|
|
|
|
|
|
|
|
/* Set the stack and new vectors */
|
2016-06-30 17:40:44 +00:00
|
|
|
kern_hyp_va x1
|
|
|
|
mov sp, x1
|
|
|
|
msr vbar_el2, x2
|
2012-12-17 17:07:52 +00:00
|
|
|
|
arm64: KVM: Cleanup tpidr_el2 init on non-VHE
When running on a non-VHE system, we initialize tpidr_el2 to
contain the per-CPU offset required to reach per-cpu variables.
Actually, we initialize it twice: the first time as part of the
EL2 initialization, by copying tpidr_el1 into its el2 counterpart,
and another time by calling into __kvm_set_tpidr_el2.
It turns out that the first part is wrong, as it includes the
distance between the kernel mapping and the linear mapping, while
EL2 only cares about the linear mapping. This was the last vestige
of the first per-cpu use of tpidr_el2 that came in with SDEI.
The only caller then was hyp_panic(), and its now using the
pc-relative get_host_ctxt() stuff, instead of kimage addresses
from the literal pool.
It is not a big deal, as we override it straight away, but it is
slightly confusing. In order to clear said confusion, let's
set this directly as part of the hyp-init code, and drop the
ad-hoc HYP helper.
Reviewed-by: James Morse <james.morse@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2018-07-10 12:20:47 +00:00
|
|
|
/* Set tpidr_el2 for use by HYP */
|
|
|
|
msr tpidr_el2, x3
|
2018-01-08 15:38:07 +00:00
|
|
|
|
2012-12-17 17:07:52 +00:00
|
|
|
/* Hello, World! */
|
|
|
|
eret
|
2020-02-18 19:58:37 +00:00
|
|
|
SYM_CODE_END(__kvm_hyp_init)
|
2012-12-17 17:07:52 +00:00
|
|
|
|
2020-02-18 19:58:37 +00:00
|
|
|
SYM_CODE_START(__kvm_handle_stub_hvc)
|
2017-04-03 18:38:04 +00:00
|
|
|
cmp x0, #HVC_SOFT_RESTART
|
2017-04-03 18:37:44 +00:00
|
|
|
b.ne 1f
|
|
|
|
|
|
|
|
/* This is where we're about to jump, staying at EL2 */
|
|
|
|
msr elr_el2, x1
|
|
|
|
mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
|
|
|
|
msr spsr_el2, x0
|
|
|
|
|
|
|
|
/* Shuffle the arguments, and don't come back */
|
|
|
|
mov x0, x2
|
|
|
|
mov x1, x3
|
|
|
|
mov x2, x4
|
|
|
|
b reset
|
|
|
|
|
2017-04-03 18:37:41 +00:00
|
|
|
1: cmp x0, #HVC_RESET_VECTORS
|
2017-04-03 18:37:40 +00:00
|
|
|
b.ne 1f
|
2017-04-03 18:37:44 +00:00
|
|
|
reset:
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 16:47:05 +00:00
|
|
|
/*
|
2017-04-03 18:37:44 +00:00
|
|
|
* Reset kvm back to the hyp stub. Do not clobber x0-x4 in
|
|
|
|
* case we coming via HVC_SOFT_RESTART.
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 16:47:05 +00:00
|
|
|
*/
|
2017-04-03 18:37:44 +00:00
|
|
|
mrs x5, sctlr_el2
|
2020-03-04 09:36:31 +00:00
|
|
|
mov_q x6, SCTLR_ELx_FLAGS
|
2017-04-03 18:37:44 +00:00
|
|
|
bic x5, x5, x6 // Clear SCTL_M and etc
|
2018-01-29 11:59:52 +00:00
|
|
|
pre_disable_mmu_workaround
|
2017-04-03 18:37:44 +00:00
|
|
|
msr sctlr_el2, x5
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 16:47:05 +00:00
|
|
|
isb
|
|
|
|
|
|
|
|
/* Install stub vectors */
|
2017-04-03 18:37:44 +00:00
|
|
|
adr_l x5, __hyp_stub_vectors
|
|
|
|
msr vbar_el2, x5
|
2017-04-03 18:38:05 +00:00
|
|
|
mov x0, xzr
|
|
|
|
eret
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 16:47:05 +00:00
|
|
|
|
2017-04-03 18:37:40 +00:00
|
|
|
1: /* Bad stub call */
|
2020-03-04 09:36:31 +00:00
|
|
|
mov_q x0, HVC_STUB_ERR
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 16:47:05 +00:00
|
|
|
eret
|
2017-04-03 18:38:05 +00:00
|
|
|
|
2020-02-18 19:58:37 +00:00
|
|
|
SYM_CODE_END(__kvm_handle_stub_hvc)
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 16:47:05 +00:00
|
|
|
|
2012-12-17 17:07:52 +00:00
|
|
|
.popsection
|