KVM/arm64 fixes for 6.1, take #3

- Fix the pKVM stage-1 walker erronously using the stage-2 accessor
 
 - Correctly convert vcpu->kvm to a hyp pointer when generating
   an exception in a nVHE+MTE configuration
 
 - Check that KVM_CAP_DIRTY_LOG_* are valid before enabling them
 
 - Fix SMPRI_EL1/TPIDR2_EL0 trapping on VHE
 
 - Document the boot requirements for FGT when entering the kernel
   at EL1
 -----BEGIN PGP SIGNATURE-----
 
 iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmNiMJIPHG1hekBrZXJu
 ZWwub3JnAAoJECPQ0LrRPXpDQDcQAKy3Z3U6SkXmnbvGxkVNDJgVk0njdEjfqd5j
 EMp9CYI5gderScBFP6Ug7do3tzSbLfvuIl6zRkSpUhNEcmCDB2SA+lkyEClqfhYh
 CrWO5jF7JEubzJOVYEGmAZ+fbbgkhqEIL52IqHzUPTSVf690UEBFzs/1yKujgfuA
 8razVFG6gLuRTbbPWRvHtJ1/AcM7rLlqMgae1UwpwZJlsxIBwjkQZHWG+owhJBqz
 u+eoY0pUwkpQqfYDyMfOwjFGDrlOpWou3wttbk/P2GCsliTwO/A7RlQojm0tFc9v
 colWYvTzCpDqWSBA7sDsnA933DRxJookLxA3LZpt7qvmChveNd3SJJyOQVLkj4sX
 pXbl/1Ed1xYANZBNx4fx0nsIFkyeEUZ27QRtCrLMmSohGSkmJbKcx4mg2Xb/iipp
 ZF23r8M0BWghEN4lDjoGRMvWx4LBRnfahtCjz9QGznEeU/4n2Dk4ncytOP8RQBc4
 MBsf7ZAGxLu18pEXcDzZpSW6wwX8lQAcFpMuYSbDCheFre9Ah44kzkS6D9yZYWLh
 b49jA5V+/fsL8wn+jgzA1bl7VbYXa/GOx83uCn+Udp6ohj8gFrFsoXgwNMW2GN1r
 I+Xsgxv0cJ5fDS/kH1zkE1op1wamsU93BLKhNcqHpeKeIbmWD2EMwcyhBXPzvtci
 LKHVpuIX
 =XaXS
 -----END PGP SIGNATURE-----

Merge tag 'kvmarm-fixes-6.1-3' into kvm-arm64/dirty-ring

KVM/arm64 fixes for 6.1, take #3

- Fix the pKVM stage-1 walker erronously using the stage-2 accessor

- Correctly convert vcpu->kvm to a hyp pointer when generating
  an exception in a nVHE+MTE configuration

- Check that KVM_CAP_DIRTY_LOG_* are valid before enabling them

- Fix SMPRI_EL1/TPIDR2_EL0 trapping on VHE

- Document the boot requirements for FGT when entering the kernel
  at EL1

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2022-11-10 13:11:38 +00:00
commit 590925a178
7 changed files with 34 additions and 36 deletions

View File

@ -340,6 +340,14 @@ Before jumping into the kernel, the following conditions must be met:
- SMCR_EL2.LEN must be initialised to the same value for all CPUs the
kernel will execute on.
- HWFGRTR_EL2.nTPIDR2_EL0 (bit 55) must be initialised to 0b01.
- HWFGWTR_EL2.nTPIDR2_EL0 (bit 55) must be initialised to 0b01.
- HWFGRTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01.
- HWFGWTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01.
For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64)
- If EL3 is present:

View File

@ -13,6 +13,7 @@
#include <hyp/adjust_pc.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__)
#error Hypervisor code only!
@ -115,7 +116,7 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
new |= (old & PSR_C_BIT);
new |= (old & PSR_V_BIT);
if (kvm_has_mte(vcpu->kvm))
if (kvm_has_mte(kern_hyp_va(vcpu->kvm)))
new |= PSR_TCO_BIT;
new |= (old & PSR_DIT_BIT);

View File

@ -87,6 +87,17 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_SME)) {
sysreg_clear_set_s(SYS_HFGRTR_EL2,
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK,
0);
sysreg_clear_set_s(SYS_HFGWTR_EL2,
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK,
0);
}
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
@ -96,6 +107,15 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3())
write_sysreg(0, pmuserenr_el0);
if (cpus_have_final_cap(ARM64_SME)) {
sysreg_clear_set_s(SYS_HFGRTR_EL2, 0,
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK);
sysreg_clear_set_s(SYS_HFGWTR_EL2, 0,
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK);
}
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu)

View File

@ -516,7 +516,7 @@ static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
if (!kvm_pte_valid(pte))
return PKVM_NOPAGE;
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
}
static int __hyp_check_page_state_range(u64 addr, u64 size,

View File

@ -55,18 +55,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
write_sysreg(val, cptr_el2);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_SME)) {
val = read_sysreg_s(SYS_HFGRTR_EL2);
val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK);
write_sysreg_s(val, SYS_HFGRTR_EL2);
val = read_sysreg_s(SYS_HFGWTR_EL2);
val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK);
write_sysreg_s(val, SYS_HFGWTR_EL2);
}
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@ -110,20 +98,6 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
if (cpus_have_final_cap(ARM64_SME)) {
u64 val;
val = read_sysreg_s(SYS_HFGRTR_EL2);
val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK;
write_sysreg_s(val, SYS_HFGRTR_EL2);
val = read_sysreg_s(SYS_HFGWTR_EL2);
val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK;
write_sysreg_s(val, SYS_HFGWTR_EL2);
}
cptr = CPTR_EL2_DEFAULT;
if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
cptr |= CPTR_EL2_TZ;

View File

@ -63,10 +63,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu);
}
if (cpus_have_final_cap(ARM64_SME))
write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
sctlr_el2);
write_sysreg(val, cpacr_el1);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
@ -88,10 +84,6 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
if (cpus_have_final_cap(ARM64_SME))
write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
sctlr_el2);
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
if (!arm64_kernel_unmapped_at_el0())

View File

@ -4585,6 +4585,9 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
}
case KVM_CAP_DIRTY_LOG_RING:
case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
return -EINVAL;
return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
default:
return kvm_vm_ioctl_enable_cap(kvm, cap);