kvm: arm64: Remove __hyp_this_cpu_read
this_cpu_ptr is meant for use in kernel proper because it selects between TPIDR_EL1/2 based on nVHE/VHE. __hyp_this_cpu_ptr was used in hyp to always select TPIDR_EL2. Unify all users behind this_cpu_ptr and friends by selecting _EL2 register under __KVM_NVHE_HYPERVISOR__. VHE continues selecting the register using alternatives. Under CONFIG_DEBUG_PREEMPT, the kernel helpers perform a preemption check which is omitted by the hyp helpers. Preserve the behavior for nVHE by overriding the corresponding macros under __KVM_NVHE_HYPERVISOR__. Extend the checks into VHE hyp code. Signed-off-by: David Brazdil <dbrazdil@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Acked-by: Andrew Scull <ascull@google.com> Acked-by: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20200922204910.7265-5-dbrazdil@google.com
This commit is contained in:
committed by
Marc Zyngier
parent
3471ee06e3
commit
717cf94adb
@@ -143,26 +143,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
|
|||||||
addr; \
|
addr; \
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
|
||||||
* Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
|
|
||||||
* provided that sym is really a *symbol* and not a pointer obtained from
|
|
||||||
* a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
|
|
||||||
* sparse quiet.
|
|
||||||
*/
|
|
||||||
#define __hyp_this_cpu_ptr(sym) \
|
|
||||||
({ \
|
|
||||||
void *__ptr; \
|
|
||||||
__verify_pcpu_ptr(&sym); \
|
|
||||||
__ptr = hyp_symbol_addr(sym); \
|
|
||||||
__ptr += read_sysreg(tpidr_el2); \
|
|
||||||
(typeof(sym) __kernel __force *)__ptr; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __hyp_this_cpu_read(sym) \
|
|
||||||
({ \
|
|
||||||
*__hyp_this_cpu_ptr(sym); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __KVM_EXTABLE(from, to) \
|
#define __KVM_EXTABLE(from, to) \
|
||||||
" .pushsection __kvm_ex_table, \"a\"\n" \
|
" .pushsection __kvm_ex_table, \"a\"\n" \
|
||||||
" .align 3\n" \
|
" .align 3\n" \
|
||||||
|
|||||||
@@ -19,7 +19,16 @@ static inline void set_my_cpu_offset(unsigned long off)
|
|||||||
:: "r" (off) : "memory");
|
:: "r" (off) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __my_cpu_offset(void)
|
static inline unsigned long __hyp_my_cpu_offset(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Non-VHE hyp code runs with preemption disabled. No need to hazard
|
||||||
|
* the register access against barrier() as in __kern_my_cpu_offset.
|
||||||
|
*/
|
||||||
|
return read_sysreg(tpidr_el2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __kern_my_cpu_offset(void)
|
||||||
{
|
{
|
||||||
unsigned long off;
|
unsigned long off;
|
||||||
|
|
||||||
@@ -35,7 +44,12 @@ static inline unsigned long __my_cpu_offset(void)
|
|||||||
|
|
||||||
return off;
|
return off;
|
||||||
}
|
}
|
||||||
#define __my_cpu_offset __my_cpu_offset()
|
|
||||||
|
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||||
|
#define __my_cpu_offset __hyp_my_cpu_offset()
|
||||||
|
#else
|
||||||
|
#define __my_cpu_offset __kern_my_cpu_offset()
|
||||||
|
#endif
|
||||||
|
|
||||||
#define PERCPU_RW_OPS(sz) \
|
#define PERCPU_RW_OPS(sz) \
|
||||||
static inline unsigned long __percpu_read_##sz(void *ptr) \
|
static inline unsigned long __percpu_read_##sz(void *ptr) \
|
||||||
@@ -227,4 +241,14 @@ PERCPU_RET_OP(add, add, ldadd)
|
|||||||
|
|
||||||
#include <asm-generic/percpu.h>
|
#include <asm-generic/percpu.h>
|
||||||
|
|
||||||
|
/* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
|
||||||
|
#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
|
||||||
|
#undef this_cpu_ptr
|
||||||
|
#define this_cpu_ptr raw_cpu_ptr
|
||||||
|
#undef __this_cpu_read
|
||||||
|
#define __this_cpu_read raw_cpu_read
|
||||||
|
#undef __this_cpu_write
|
||||||
|
#define __this_cpu_write raw_cpu_write
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* __ASM_PERCPU_H */
|
#endif /* __ASM_PERCPU_H */
|
||||||
|
|||||||
@@ -135,7 +135,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
|
|||||||
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||||
guest_ctxt = &vcpu->arch.ctxt;
|
guest_ctxt = &vcpu->arch.ctxt;
|
||||||
host_dbg = &vcpu->arch.host_debug_state.regs;
|
host_dbg = &vcpu->arch.host_debug_state.regs;
|
||||||
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
|
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
|
||||||
@@ -154,7 +154,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
|
|||||||
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||||
guest_ctxt = &vcpu->arch.ctxt;
|
guest_ctxt = &vcpu->arch.ctxt;
|
||||||
host_dbg = &vcpu->arch.host_debug_state.regs;
|
host_dbg = &vcpu->arch.host_debug_state.regs;
|
||||||
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
|
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
|
||||||
|
|||||||
@@ -386,7 +386,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
|
|||||||
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
|
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||||
__ptrauth_save_key(ctxt, APIA);
|
__ptrauth_save_key(ctxt, APIA);
|
||||||
__ptrauth_save_key(ctxt, APIB);
|
__ptrauth_save_key(ctxt, APIB);
|
||||||
__ptrauth_save_key(ctxt, APDA);
|
__ptrauth_save_key(ctxt, APDA);
|
||||||
@@ -495,7 +495,7 @@ static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
|
|||||||
* guest wants it disabled, so be it...
|
* guest wants it disabled, so be it...
|
||||||
*/
|
*/
|
||||||
if (__needs_ssbd_off(vcpu) &&
|
if (__needs_ssbd_off(vcpu) &&
|
||||||
__hyp_this_cpu_read(arm64_ssbd_callback_required))
|
__this_cpu_read(arm64_ssbd_callback_required))
|
||||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
|
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@@ -507,7 +507,7 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
|
|||||||
* If the guest has disabled the workaround, bring it back on.
|
* If the guest has disabled the workaround, bring it back on.
|
||||||
*/
|
*/
|
||||||
if (__needs_ssbd_off(vcpu) &&
|
if (__needs_ssbd_off(vcpu) &&
|
||||||
__hyp_this_cpu_read(arm64_ssbd_callback_required))
|
__this_cpu_read(arm64_ssbd_callback_required))
|
||||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
|
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@@ -521,7 +521,7 @@ static inline void __kvm_unexpected_el2_exception(void)
|
|||||||
|
|
||||||
entry = hyp_symbol_addr(__start___kvm_ex_table);
|
entry = hyp_symbol_addr(__start___kvm_ex_table);
|
||||||
end = hyp_symbol_addr(__stop___kvm_ex_table);
|
end = hyp_symbol_addr(__stop___kvm_ex_table);
|
||||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||||
|
|
||||||
while (entry < end) {
|
while (entry < end) {
|
||||||
addr = (unsigned long)&entry->insn + entry->insn;
|
addr = (unsigned long)&entry->insn + entry->insn;
|
||||||
|
|||||||
@@ -175,7 +175,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
vcpu = kern_hyp_va(vcpu);
|
vcpu = kern_hyp_va(vcpu);
|
||||||
|
|
||||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||||
host_ctxt->__hyp_running_vcpu = vcpu;
|
host_ctxt->__hyp_running_vcpu = vcpu;
|
||||||
guest_ctxt = &vcpu->arch.ctxt;
|
guest_ctxt = &vcpu->arch.ctxt;
|
||||||
|
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm_cpu_context *guest_ctxt;
|
struct kvm_cpu_context *guest_ctxt;
|
||||||
u64 exit_code;
|
u64 exit_code;
|
||||||
|
|
||||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||||
host_ctxt->__hyp_running_vcpu = vcpu;
|
host_ctxt->__hyp_running_vcpu = vcpu;
|
||||||
guest_ctxt = &vcpu->arch.ctxt;
|
guest_ctxt = &vcpu->arch.ctxt;
|
||||||
|
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
||||||
struct kvm_cpu_context *host_ctxt;
|
struct kvm_cpu_context *host_ctxt;
|
||||||
|
|
||||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||||
__sysreg_save_user_state(host_ctxt);
|
__sysreg_save_user_state(host_ctxt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -100,7 +100,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
|
|||||||
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
||||||
struct kvm_cpu_context *host_ctxt;
|
struct kvm_cpu_context *host_ctxt;
|
||||||
|
|
||||||
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
|
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||||
deactivate_traps_vhe_put();
|
deactivate_traps_vhe_put();
|
||||||
|
|
||||||
__sysreg_save_el1_state(guest_ctxt);
|
__sysreg_save_el1_state(guest_ctxt);
|
||||||
|
|||||||
Reference in New Issue
Block a user