2019-05-29 14:12:40 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2013-01-20 23:28:06 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ARM_KVM_EMULATE_H__
|
|
|
|
#define __ARM_KVM_EMULATE_H__
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <asm/kvm_asm.h>
|
2013-01-20 23:43:58 +00:00
|
|
|
#include <asm/kvm_mmio.h>
|
2012-09-17 18:27:09 +00:00
|
|
|
#include <asm/kvm_arm.h>
|
2014-06-02 13:37:13 +00:00
|
|
|
#include <asm/cputype.h>
|
2013-01-20 23:28:06 +00:00
|
|
|
|
2017-10-29 02:18:09 +00:00
|
|
|
/* arm64 compatibility macros */
|
2018-07-05 14:16:53 +00:00
|
|
|
#define PSR_AA32_MODE_ABT ABT_MODE
|
|
|
|
#define PSR_AA32_MODE_UND UND_MODE
|
|
|
|
#define PSR_AA32_T_BIT PSR_T_BIT
|
|
|
|
#define PSR_AA32_I_BIT PSR_I_BIT
|
|
|
|
#define PSR_AA32_A_BIT PSR_A_BIT
|
|
|
|
#define PSR_AA32_E_BIT PSR_E_BIT
|
|
|
|
#define PSR_AA32_IT_MASK PSR_IT_MASK
|
2017-10-29 02:18:09 +00:00
|
|
|
|
2012-10-03 10:17:02 +00:00
|
|
|
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
2017-10-29 02:18:09 +00:00
|
|
|
|
|
|
|
static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
|
|
|
|
{
|
|
|
|
return vcpu_reg(vcpu, reg_num);
|
|
|
|
}
|
|
|
|
|
2017-12-27 19:01:52 +00:00
|
|
|
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
|
|
static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return *__vcpu_spsr(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
|
|
|
|
{
|
|
|
|
*__vcpu_spsr(vcpu) = v;
|
|
|
|
}
|
2013-01-20 23:28:06 +00:00
|
|
|
|
2015-12-04 12:03:11 +00:00
|
|
|
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
|
|
|
|
u8 reg_num)
|
|
|
|
{
|
|
|
|
return *vcpu_reg(vcpu, reg_num);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
*vcpu_reg(vcpu, reg_num) = val;
|
|
|
|
}
|
|
|
|
|
2016-09-06 08:28:43 +00:00
|
|
|
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
|
2017-10-29 02:18:09 +00:00
|
|
|
void kvm_inject_undef32(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
|
|
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
2016-09-06 13:02:09 +00:00
|
|
|
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
|
2017-10-29 02:18:09 +00:00
|
|
|
|
|
|
|
static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
kvm_inject_undef32(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
|
|
{
|
|
|
|
kvm_inject_dabt32(vcpu, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
|
|
{
|
|
|
|
kvm_inject_pabt32(vcpu, addr);
|
|
|
|
}
|
2013-01-20 23:28:09 +00:00
|
|
|
|
2016-09-06 08:28:43 +00:00
|
|
|
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_condition_valid32(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
|
|
|
{
|
|
|
|
kvm_skip_instr32(vcpu, is_wide_instr);
|
|
|
|
}
|
|
|
|
|
2014-10-16 15:21:16 +00:00
|
|
|
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.hcr = HCR_GUEST_MASK;
|
|
|
|
}
|
|
|
|
|
2017-08-03 10:09:05 +00:00
|
|
|
static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
|
2014-12-19 16:05:31 +00:00
|
|
|
{
|
2017-08-03 10:09:05 +00:00
|
|
|
return (unsigned long *)&vcpu->arch.hcr;
|
2014-12-19 16:05:31 +00:00
|
|
|
}
|
|
|
|
|
2019-11-07 16:04:12 +00:00
|
|
|
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
|
2018-06-21 09:43:59 +00:00
|
|
|
{
|
|
|
|
vcpu->arch.hcr &= ~HCR_TWE;
|
|
|
|
}
|
|
|
|
|
2019-11-07 16:04:12 +00:00
|
|
|
static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
|
2018-06-21 09:43:59 +00:00
|
|
|
{
|
|
|
|
vcpu->arch.hcr |= HCR_TWE;
|
|
|
|
}
|
|
|
|
|
2016-09-06 08:28:43 +00:00
|
|
|
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
2013-01-20 23:28:13 +00:00
|
|
|
{
|
2018-08-08 00:04:40 +00:00
|
|
|
return true;
|
2013-01-20 23:28:13 +00:00
|
|
|
}
|
|
|
|
|
2012-10-03 10:17:02 +00:00
|
|
|
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
|
2013-01-20 23:28:06 +00:00
|
|
|
{
|
2016-01-03 11:26:01 +00:00
|
|
|
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
|
2013-01-20 23:28:06 +00:00
|
|
|
}
|
|
|
|
|
2016-09-06 08:28:43 +00:00
|
|
|
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
2013-01-20 23:28:06 +00:00
|
|
|
{
|
2016-09-06 08:28:43 +00:00
|
|
|
return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
|
2013-01-20 23:28:06 +00:00
|
|
|
}
|
|
|
|
|
2013-01-20 23:28:13 +00:00
|
|
|
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
|
|
|
}
|
|
|
|
|
2013-01-20 23:28:06 +00:00
|
|
|
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-01-03 11:26:01 +00:00
|
|
|
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
|
2013-01-20 23:28:06 +00:00
|
|
|
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-01-03 11:26:01 +00:00
|
|
|
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
|
2018-01-23 15:11:14 +00:00
|
|
|
return cpsr_mode > USR_MODE;
|
2013-01-20 23:28:06 +00:00
|
|
|
}
|
|
|
|
|
2016-09-06 08:28:43 +00:00
|
|
|
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
2012-09-17 18:27:09 +00:00
|
|
|
{
|
|
|
|
return vcpu->arch.fault.hsr;
|
|
|
|
}
|
|
|
|
|
2016-09-06 08:28:43 +00:00
|
|
|
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
|
|
|
|
|
|
|
if (hsr & HSR_CV)
|
|
|
|
return (hsr & HSR_COND) >> HSR_COND_SHIFT;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-09-17 18:27:09 +00:00
|
|
|
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return vcpu->arch.fault.hxfar;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
|
|
|
|
}
|
|
|
|
|
2012-09-18 10:06:23 +00:00
|
|
|
static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
|
|
|
|
}
|
|
|
|
|
KVM: arm/arm64: Allow reporting non-ISV data aborts to userspace
For a long time, if a guest accessed memory outside of a memslot using
any of the load/store instructions in the architecture which doesn't
supply decoding information in the ESR_EL2 (the ISV bit is not set), the
kernel would print the following message and terminate the VM as a
result of returning -ENOSYS to userspace:
load/store instruction decoding not implemented
The reason behind this message is that KVM assumes that all accesses
outside a memslot is an MMIO access which should be handled by
userspace, and we originally expected to eventually implement some sort
of decoding of load/store instructions where the ISV bit was not set.
However, it turns out that many of the instructions which don't provide
decoding information on abort are not safe to use for MMIO accesses, and
the remaining few that would potentially make sense to use on MMIO
accesses, such as those with register writeback, are not used in
practice. It also turns out that fetching an instruction from guest
memory can be a pretty horrible affair, involving stopping all CPUs on
SMP systems, handling multiple corner cases of address translation in
software, and more. It doesn't appear likely that we'll ever implement
this in the kernel.
What is much more common is that a user has misconfigured his/her guest
and is actually not accessing an MMIO region, but just hitting some
random hole in the IPA space. In this scenario, the error message above
is almost misleading and has led to a great deal of confusion over the
years.
It is, nevertheless, ABI to userspace, and we therefore need to
introduce a new capability that userspace explicitly enables to change
behavior.
This patch introduces KVM_CAP_ARM_NISV_TO_USER (NISV meaning Non-ISV)
which does exactly that, and introduces a new exit reason to report the
event to userspace. User space can then emulate an exception to the
guest, restart the guest, suspend the guest, or take any other
appropriate action as per the policy of the running system.
Reported-by: Heinrich Schuchardt <xypron.glpk@gmx.de>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Alexander Graf <graf@amazon.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2019-10-11 11:07:05 +00:00
|
|
|
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
|
|
|
|
}
|
|
|
|
|
2012-09-18 10:12:26 +00:00
|
|
|
static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
|
|
|
|
}
|
|
|
|
|
2012-09-18 10:23:02 +00:00
|
|
|
static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
|
|
|
|
}
|
|
|
|
|
2012-09-18 10:28:57 +00:00
|
|
|
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
|
|
|
}
|
|
|
|
|
2012-09-18 10:37:28 +00:00
|
|
|
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
|
|
|
|
}
|
|
|
|
|
2016-01-29 15:01:28 +00:00
|
|
|
static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
|
|
|
|
}
|
|
|
|
|
2012-09-18 10:43:30 +00:00
|
|
|
/* Get Access Size from a data abort */
|
|
|
|
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
|
|
|
|
case 0:
|
|
|
|
return 1;
|
|
|
|
case 1:
|
|
|
|
return 2;
|
|
|
|
case 2:
|
|
|
|
return 4;
|
|
|
|
default:
|
|
|
|
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-18 11:07:06 +00:00
|
|
|
/* This one is not specific to Data Abort */
|
|
|
|
static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
|
|
|
|
}
|
|
|
|
|
2012-09-18 13:09:58 +00:00
|
|
|
static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
|
|
|
|
}
|
|
|
|
|
2012-10-15 09:33:38 +00:00
|
|
|
static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
|
|
|
|
}
|
|
|
|
|
2012-09-18 13:14:35 +00:00
|
|
|
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
|
2014-09-26 10:29:34 +00:00
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
|
2012-09-18 13:14:35 +00:00
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
|
|
|
|
}
|
|
|
|
|
2017-07-18 12:37:41 +00:00
|
|
|
static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2017-10-30 06:05:18 +00:00
|
|
|
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
2017-07-18 12:37:41 +00:00
|
|
|
case FSC_SEA:
|
|
|
|
case FSC_SEA_TTW0:
|
|
|
|
case FSC_SEA_TTW1:
|
|
|
|
case FSC_SEA_TTW2:
|
|
|
|
case FSC_SEA_TTW3:
|
|
|
|
case FSC_SECC:
|
|
|
|
case FSC_SECC_TTW0:
|
|
|
|
case FSC_SECC_TTW1:
|
|
|
|
case FSC_SECC_TTW2:
|
|
|
|
case FSC_SECC_TTW3:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-01 20:29:58 +00:00
|
|
|
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (kvm_vcpu_trap_is_iabt(vcpu))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return kvm_vcpu_dabt_iswrite(vcpu);
|
|
|
|
}
|
|
|
|
|
2013-02-21 19:26:10 +00:00
|
|
|
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
|
|
|
}
|
|
|
|
|
2014-06-02 13:37:13 +00:00
|
|
|
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
2013-10-18 17:19:03 +00:00
|
|
|
{
|
2016-01-03 11:26:01 +00:00
|
|
|
return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
|
2013-10-18 17:19:03 +00:00
|
|
|
}
|
|
|
|
|
2019-05-03 14:27:49 +00:00
|
|
|
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
|
|
|
|
bool flag)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-11-05 14:12:15 +00:00
|
|
|
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
|
|
|
}
|
|
|
|
|
2013-02-12 12:40:22 +00:00
|
|
|
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long data,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
if (kvm_vcpu_is_be(vcpu)) {
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
return data & 0xff;
|
|
|
|
case 2:
|
|
|
|
return be16_to_cpu(data & 0xffff);
|
|
|
|
default:
|
|
|
|
return be32_to_cpu(data);
|
|
|
|
}
|
2014-06-12 16:30:05 +00:00
|
|
|
} else {
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
return data & 0xff;
|
|
|
|
case 2:
|
|
|
|
return le16_to_cpu(data & 0xffff);
|
|
|
|
default:
|
|
|
|
return le32_to_cpu(data);
|
|
|
|
}
|
2013-02-12 12:40:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long data,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
if (kvm_vcpu_is_be(vcpu)) {
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
return data & 0xff;
|
|
|
|
case 2:
|
|
|
|
return cpu_to_be16(data & 0xffff);
|
|
|
|
default:
|
|
|
|
return cpu_to_be32(data);
|
|
|
|
}
|
2014-06-12 16:30:05 +00:00
|
|
|
} else {
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
return data & 0xff;
|
|
|
|
case 2:
|
|
|
|
return cpu_to_le16(data & 0xffff);
|
|
|
|
default:
|
|
|
|
return cpu_to_le32(data);
|
|
|
|
}
|
2013-02-12 12:40:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 04:42:35 +00:00
|
|
|
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
|
|
|
|
|
2013-01-20 23:28:06 +00:00
|
|
|
#endif /* __ARM_KVM_EMULATE_H__ */
|