2019-06-03 05:44:50 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-10-23 07:26:37 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*/
|
|
|
|
|
2016-01-28 13:44:07 +00:00
|
|
|
#include <asm/kvm_hyp.h>
|
2017-10-23 16:11:14 +00:00
|
|
|
#include <asm/kvm_mmu.h>
|
2017-01-25 15:52:31 +00:00
|
|
|
#include <asm/tlbflush.h>
|
2015-10-23 07:26:37 +00:00
|
|
|
|
2018-12-06 17:31:25 +00:00
|
|
|
struct tlb_inv_context {
|
|
|
|
u64 tcr;
|
|
|
|
};
|
|
|
|
|
2019-01-04 20:09:05 +00:00
|
|
|
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
|
|
|
|
struct tlb_inv_context *cxt)
|
2017-02-17 14:32:18 +00:00
|
|
|
{
|
2020-05-04 09:48:58 +00:00
|
|
|
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
2019-07-30 09:50:38 +00:00
|
|
|
u64 val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For CPUs that are affected by ARM 1319367, we need to
|
|
|
|
* avoid a host Stage-1 walk while we have the guest's
|
|
|
|
* VMID set in the VTTBR in order to invalidate TLBs.
|
|
|
|
* We're guaranteed that the S1 MMU is enabled, so we can
|
|
|
|
* simply set the EPD bits to avoid any further TLB fill.
|
|
|
|
*/
|
|
|
|
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
|
|
|
|
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
|
|
|
|
write_sysreg_el1(val, SYS_TCR);
|
|
|
|
isb();
|
|
|
|
}
|
|
|
|
|
2020-07-13 14:15:14 +00:00
|
|
|
/*
|
|
|
|
* __load_guest_stage2() includes an ISB only when the AT
|
|
|
|
* workaround is applied. Take care of the opposite condition,
|
|
|
|
* ensuring that we always have an ISB, but not two ISBs back
|
|
|
|
* to back.
|
|
|
|
*/
|
2019-01-04 20:09:05 +00:00
|
|
|
__load_guest_stage2(mmu);
|
2020-07-13 14:15:14 +00:00
|
|
|
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
|
2017-02-17 14:32:18 +00:00
|
|
|
}
|
|
|
|
|
2019-01-04 20:09:05 +00:00
|
|
|
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
|
2017-02-17 14:32:18 +00:00
|
|
|
{
|
|
|
|
write_sysreg(0, vttbr_el2);
|
2019-07-30 09:50:38 +00:00
|
|
|
|
2020-05-04 09:48:58 +00:00
|
|
|
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
2019-07-30 09:50:38 +00:00
|
|
|
/* Ensure write of the host VMID */
|
|
|
|
isb();
|
|
|
|
/* Restore the host's TCR_EL1 */
|
|
|
|
write_sysreg_el1(cxt->tcr, SYS_TCR);
|
|
|
|
}
|
2017-02-17 14:32:18 +00:00
|
|
|
}
|
|
|
|
|
2019-01-02 12:34:25 +00:00
|
|
|
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
|
|
|
|
phys_addr_t ipa, int level)
|
2015-10-23 07:26:37 +00:00
|
|
|
{
|
2018-12-06 17:31:25 +00:00
|
|
|
struct tlb_inv_context cxt;
|
2018-12-06 17:31:19 +00:00
|
|
|
|
2015-10-23 07:26:37 +00:00
|
|
|
dsb(ishst);
|
|
|
|
|
|
|
|
/* Switch to requested VMID */
|
2019-01-04 20:09:05 +00:00
|
|
|
__tlb_switch_to_guest(mmu, &cxt);
|
2015-10-23 07:26:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We could do so much better if we had the VA as well.
|
|
|
|
* Instead, we invalidate Stage-2 for this IPA, and the
|
|
|
|
* whole of Stage-1. Weep...
|
|
|
|
*/
|
|
|
|
ipa >>= 12;
|
2019-01-02 12:34:25 +00:00
|
|
|
__tlbi_level(ipas2e1is, ipa, level);
|
2015-10-23 07:26:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to ensure completion of the invalidation at Stage-2,
|
|
|
|
* since a table walk on another CPU could refill a TLB with a
|
|
|
|
* complete (S1 + S2) walk based on the old Stage-2 mapping if
|
|
|
|
* the Stage-1 invalidation happened first.
|
|
|
|
*/
|
|
|
|
dsb(ish);
|
2017-01-25 15:52:31 +00:00
|
|
|
__tlbi(vmalle1is);
|
2015-10-23 07:26:37 +00:00
|
|
|
dsb(ish);
|
|
|
|
isb();
|
|
|
|
|
2017-03-10 20:32:25 +00:00
|
|
|
/*
|
|
|
|
* If the host is running at EL1 and we have a VPIPT I-cache,
|
|
|
|
* then we must perform I-cache maintenance at EL2 in order for
|
|
|
|
* it to have an effect on the guest. Since the guest cannot hit
|
|
|
|
* I-cache lines allocated with a different VMID, we don't need
|
|
|
|
* to worry about junk out of guest reset (we nuke the I-cache on
|
|
|
|
* VMID rollover), but we do need to be careful when remapping
|
|
|
|
* executable pages for the same guest. This can happen when KSM
|
|
|
|
* takes a CoW fault on an executable page, copies the page into
|
|
|
|
* a page that was previously mapped in the guest and then needs
|
|
|
|
* to invalidate the guest view of the I-cache for that page
|
|
|
|
* from EL1. To solve this, we invalidate the entire I-cache when
|
|
|
|
* unmapping a page from a guest if we have a VPIPT I-cache but
|
|
|
|
* the host is running at EL1. As above, we could do better if
|
|
|
|
* we had the VA.
|
|
|
|
*
|
|
|
|
* The moral of this story is: if you have a VPIPT I-cache, then
|
|
|
|
* you should be running with VHE enabled.
|
|
|
|
*/
|
2020-06-25 13:14:13 +00:00
|
|
|
if (icache_is_vpipt())
|
2017-03-10 20:32:25 +00:00
|
|
|
__flush_icache_all();
|
|
|
|
|
2019-01-04 20:09:05 +00:00
|
|
|
__tlb_switch_to_host(&cxt);
|
2015-10-23 07:26:37 +00:00
|
|
|
}
|
|
|
|
|
2019-01-04 20:09:05 +00:00
|
|
|
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
2015-10-23 07:26:37 +00:00
|
|
|
{
|
2018-12-06 17:31:25 +00:00
|
|
|
struct tlb_inv_context cxt;
|
2018-12-06 17:31:19 +00:00
|
|
|
|
2015-10-23 07:26:37 +00:00
|
|
|
dsb(ishst);
|
|
|
|
|
|
|
|
/* Switch to requested VMID */
|
2019-01-04 20:09:05 +00:00
|
|
|
__tlb_switch_to_guest(mmu, &cxt);
|
2015-10-23 07:26:37 +00:00
|
|
|
|
2017-01-25 15:52:31 +00:00
|
|
|
__tlbi(vmalls12e1is);
|
2015-10-23 07:26:37 +00:00
|
|
|
dsb(ish);
|
|
|
|
isb();
|
|
|
|
|
2019-01-04 20:09:05 +00:00
|
|
|
__tlb_switch_to_host(&cxt);
|
2015-10-23 07:26:37 +00:00
|
|
|
}
|
|
|
|
|
2019-01-04 20:09:05 +00:00
|
|
|
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
2016-10-18 17:37:49 +00:00
|
|
|
{
|
2018-12-06 17:31:25 +00:00
|
|
|
struct tlb_inv_context cxt;
|
2016-10-18 17:37:49 +00:00
|
|
|
|
|
|
|
/* Switch to requested VMID */
|
2019-01-04 20:09:05 +00:00
|
|
|
__tlb_switch_to_guest(mmu, &cxt);
|
2016-10-18 17:37:49 +00:00
|
|
|
|
2017-01-25 15:52:31 +00:00
|
|
|
__tlbi(vmalle1);
|
2016-10-18 17:37:49 +00:00
|
|
|
dsb(nsh);
|
|
|
|
isb();
|
|
|
|
|
2019-01-04 20:09:05 +00:00
|
|
|
__tlb_switch_to_host(&cxt);
|
2016-10-18 17:37:49 +00:00
|
|
|
}
|
|
|
|
|
2020-06-25 13:14:19 +00:00
|
|
|
void __kvm_flush_vm_context(void)
|
2015-10-23 07:26:37 +00:00
|
|
|
{
|
|
|
|
dsb(ishst);
|
2017-01-25 15:52:31 +00:00
|
|
|
__tlbi(alle1is);
|
arm64/kvm: Remove VMID rollover I-cache maintenance
For VPIPT I-caches, we need I-cache maintenance on VMID rollover to
avoid an ABA problem. Consider a single vCPU VM, with a pinned stage-2,
running with an idmap VA->IPA and idmap IPA->PA. If we don't do
maintenance on rollover:
// VMID A
Writes insn X to PA 0xF
Invalidates PA 0xF (for VMID A)
I$ contains [{A,F}->X]
[VMID ROLLOVER]
// VMID B
Writes insn Y to PA 0xF
Invalidates PA 0xF (for VMID B)
I$ contains [{A,F}->X, {B,F}->Y]
[VMID ROLLOVER]
// VMID A
I$ contains [{A,F}->X, {B,F}->Y]
Unexpectedly hits stale I$ line {A,F}->X.
However, for PIPT and VIPT I-caches, the VMID doesn't affect lookup or
constrain maintenance. Given the VMID doesn't affect PIPT and VIPT
I-caches, and given VMID rollover is independent of changes to stage-2
mappings, I-cache maintenance cannot be necessary on VMID rollover for
PIPT or VIPT I-caches.
This patch removes the maintenance on rollover for VIPT and PIPT
I-caches. At the same time, the unnecessary colons are removed from the
asm statement to make it more legible.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: James Morse <james.morse@arm.com>
Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
Signed-off-by: Marc Zyngier <maz@kernel.org>
2019-08-06 15:57:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* VIPT and PIPT caches are not affected by VMID, so no maintenance
|
|
|
|
* is necessary across a VMID rollover.
|
|
|
|
*
|
|
|
|
* VPIPT caches constrain lookup and maintenance to the active VMID,
|
|
|
|
* so we need to invalidate lines with a stale VMID to avoid an ABA
|
|
|
|
* race after multiple rollovers.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
if (icache_is_vpipt())
|
|
|
|
asm volatile("ic ialluis");
|
|
|
|
|
2015-10-23 07:26:37 +00:00
|
|
|
dsb(ish);
|
|
|
|
}
|