mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
KVM/ARM fixes for v4.5-rc2
A few random fixes, mostly coming from the PMU work by Shannon: - fix for injecting faults coming from the guest's userspace - cleanup for our CPTR_EL2 accessors (reserved bits) - fix for a bug impacting perf (user/kernel discrimination) - fix for a 32bit sysreg handling bug -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWqehPAAoJECPQ0LrRPXpDn6gP/2lrJ9lV5I3MxLzUytmRY8EM Xl8WnNEQJ0e7oEdb1l6k4DR8D/HefzXpp/YWHY1WdDZSej0b2egro1xsFWdgaOr9 NVGJnoQBlCFqSIf2szml4ftpHXZZ/kMF/EvhtzEL6cpUdqeA/tkS6HoCMQknhCbx 3zOYnNKCGQUkFhTKJUSXB6NcZ/950uqkQxAdCPNUTGg1YzkNfbcgTewqKsmb25Cv /sOUFmrq2AlnWkdH+QWP0BtNFUX9saOSXvxrABT6nfiXSpUeF6Rprcgi9gdoNhAD mfE5IFw0dOEo2XThZTchKu3FBSMAkDadvC9yWFr88dr62E6EKFPzY3vHLCA8QoT/ zk5beGSjyWGe7FZZJ4CKdO4EWBZr/WSlSVzOfG4ZBVPUoh2AZcUEhzzrzTezzocO 71/5ZVpQ6O8+Pxwyy85Vd2drf7OZLagGNydNx46RHXrRxl+q0c5vFTVh4Txbd4YU XNsd+kA62/OYyPHbtVzTzAPPKG7aM8hLzdy8dkTgvuDzWHmxFWhD/HgiMHfFrQqs WCafvBhTc4375dvwYOupxaU2ncHKvt/zQJtBOw6bEwAIUa5c1IkIUr0i8XgRq6lr x/YvhFIwiVyXVnrDt3ZSIx79Oajf541uJg7vLFyPBQkcnQlJ6T7oy7qJlqhM0567 Sr6G0/YXa1ccIfmKyeh4 =36kx -----END PGP SIGNATURE----- Merge tag 'kvm-arm-for-4.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master KVM/ARM fixes for v4.5-rc2 A few random fixes, mostly coming from the PMU work by Shannon: - fix for injecting faults coming from the guest's userspace - cleanup for our CPTR_EL2 accessors (reserved bits) - fix for a bug impacting perf (user/kernel discrimination) - fix for a 32bit sysreg handling bug
This commit is contained in:
commit
afc6074381
@ -182,6 +182,7 @@
|
||||
#define CPTR_EL2_TCPAC (1 << 31)
|
||||
#define CPTR_EL2_TTA (1 << 20)
|
||||
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
||||
#define CPTR_EL2_DEFAULT 0x000033ff
|
||||
|
||||
/* Hyp Debug Configuration Register bits */
|
||||
#define MDCR_EL2_TDRA (1 << 11)
|
||||
|
@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
|
||||
u32 mode;
|
||||
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
if (vcpu_mode_is_32bit(vcpu)) {
|
||||
mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
|
||||
return mode > COMPAT_PSR_MODE_USR;
|
||||
}
|
||||
|
||||
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
|
||||
|
||||
return mode != PSR_MODE_EL0t;
|
||||
}
|
||||
|
@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
|
||||
write_sysreg(val, hcr_el2);
|
||||
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
|
||||
write_sysreg(1 << 15, hstr_el2);
|
||||
write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
|
||||
|
||||
val = CPTR_EL2_DEFAULT;
|
||||
val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
|
||||
write_sysreg(val, cptr_el2);
|
||||
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
}
|
||||
|
||||
@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
write_sysreg(HCR_RW, hcr_el2);
|
||||
write_sysreg(0, hstr_el2);
|
||||
write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
|
||||
write_sysreg(0, cptr_el2);
|
||||
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
|
||||
}
|
||||
|
||||
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
|
||||
|
@ -27,7 +27,11 @@
|
||||
|
||||
#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
|
||||
PSR_I_BIT | PSR_D_BIT)
|
||||
#define EL1_EXCEPT_SYNC_OFFSET 0x200
|
||||
|
||||
#define CURRENT_EL_SP_EL0_VECTOR 0x0
|
||||
#define CURRENT_EL_SP_ELx_VECTOR 0x200
|
||||
#define LOWER_EL_AArch64_VECTOR 0x400
|
||||
#define LOWER_EL_AArch32_VECTOR 0x600
|
||||
|
||||
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
||||
{
|
||||
@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
||||
*fsr = 0x14;
|
||||
}
|
||||
|
||||
enum exception_type {
|
||||
except_type_sync = 0,
|
||||
except_type_irq = 0x80,
|
||||
except_type_fiq = 0x100,
|
||||
except_type_serror = 0x180,
|
||||
};
|
||||
|
||||
static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
|
||||
{
|
||||
u64 exc_offset;
|
||||
|
||||
switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
|
||||
case PSR_MODE_EL1t:
|
||||
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
|
||||
break;
|
||||
case PSR_MODE_EL1h:
|
||||
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
|
||||
break;
|
||||
case PSR_MODE_EL0t:
|
||||
exc_offset = LOWER_EL_AArch64_VECTOR;
|
||||
break;
|
||||
default:
|
||||
exc_offset = LOWER_EL_AArch32_VECTOR;
|
||||
}
|
||||
|
||||
return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
|
||||
}
|
||||
|
||||
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
|
||||
|
||||
vcpu_sys_reg(vcpu, FAR_EL1) = addr;
|
||||
|
||||
@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
|
||||
|
||||
/*
|
||||
* Build an unknown exception, depending on the instruction
|
||||
|
@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
|
||||
if (likely(r->access(vcpu, params, r))) {
|
||||
/* Skip instruction, since it was emulated */
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
/* Handled */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Handled */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Not handled */
|
||||
@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
|
||||
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
|
||||
* @vcpu: The VCPU pointer
|
||||
* @run: The kvm_run struct
|
||||
*/
|
||||
@ -1095,7 +1094,7 @@ out:
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
|
||||
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
|
||||
* @vcpu: The VCPU pointer
|
||||
* @run: The kvm_run struct
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user