forked from Minki/linux
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers that exist on POWER7 machines. First, we were running off the end of the current->thread.fpr[] array. Ultimately this was because the vcpu->arch.vsr[] array is sized to be able to store both the FP registers and the extra VSX registers (i.e. 64 entries), but PR KVM only uses it for the extra VSX registers (i.e. 32 entries). Secondly, calling load_up_vsx() from C code is a really bad idea, because it jumps to fast_exception_return at the end, rather than returning with a blr instruction. This was causing it to jump off to a random location with random register contents, since it was using the largely uninitialized stack frame created by kvmppc_load_up_vsx. In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx, since giveup_fpu and load_up_fpu handle the extra VSX registers as well as the standard FP registers on machines with VSX. Also, since VSX instructions can access the VMX registers and the FP registers as well as the extra VSX registers, we have to load up the FP and VMX registers before we can turn on the MSR_VSX bit for the guest. Conversely, if we save away any of the VSX or FP registers, we have to turn off MSR_VSX for the guest. To handle all this, it is more convenient for a single call to kvmppc_giveup_ext() to handle all the state saving that needs to be done, so we make it take a set of MSR bits rather than just one, and the switch statement becomes a series of if statements. Similarly kvmppc_handle_ext needs to be able to load up more than one set of registers. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
b0a94d4e23
commit
28c483b62f
@ -518,6 +518,7 @@
|
||||
#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */
|
||||
#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
|
||||
#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
|
||||
#define SRR1_PROGILL 0x00080000 /* Illegal instruction */
|
||||
#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
|
||||
#define SRR1_PROGTRAP 0x00020000 /* Trap */
|
||||
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
|
||||
|
@ -28,8 +28,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -81,9 +81,7 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
|
||||
kvmppc_giveup_ext(vcpu, MSR_FP);
|
||||
kvmppc_giveup_ext(vcpu, MSR_VEC);
|
||||
kvmppc_giveup_ext(vcpu, MSR_VSX);
|
||||
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
|
||||
vcpu->cpu = -1;
|
||||
}
|
||||
|
||||
@ -433,10 +431,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
static inline int get_fpr_index(int i)
|
||||
{
|
||||
#ifdef CONFIG_VSX
|
||||
i *= 2;
|
||||
#endif
|
||||
return i;
|
||||
return i * TS_FPRWIDTH;
|
||||
}
|
||||
|
||||
/* Give up external provider (FPU, Altivec, VSX) */
|
||||
@ -450,41 +445,49 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
|
||||
u64 *thread_fpr = (u64*)t->fpr;
|
||||
int i;
|
||||
|
||||
if (!(vcpu->arch.guest_owned_ext & msr))
|
||||
/*
|
||||
* VSX instructions can access FP and vector registers, so if
|
||||
* we are giving up VSX, make sure we give up FP and VMX as well.
|
||||
*/
|
||||
if (msr & MSR_VSX)
|
||||
msr |= MSR_FP | MSR_VEC;
|
||||
|
||||
msr &= vcpu->arch.guest_owned_ext;
|
||||
if (!msr)
|
||||
return;
|
||||
|
||||
#ifdef DEBUG_EXT
|
||||
printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
|
||||
#endif
|
||||
|
||||
switch (msr) {
|
||||
case MSR_FP:
|
||||
if (msr & MSR_FP) {
|
||||
/*
|
||||
* Note that on CPUs with VSX, giveup_fpu stores
|
||||
* both the traditional FP registers and the added VSX
|
||||
* registers into thread.fpr[].
|
||||
*/
|
||||
giveup_fpu(current);
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
|
||||
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
|
||||
|
||||
vcpu->arch.fpscr = t->fpscr.val;
|
||||
break;
|
||||
case MSR_VEC:
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
if (cpu_has_feature(CPU_FTR_VSX))
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
|
||||
vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (msr & MSR_VEC) {
|
||||
giveup_altivec(current);
|
||||
memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
|
||||
vcpu->arch.vscr = t->vscr;
|
||||
#endif
|
||||
break;
|
||||
case MSR_VSX:
|
||||
#ifdef CONFIG_VSX
|
||||
__giveup_vsx(current);
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
|
||||
vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
vcpu->arch.guest_owned_ext &= ~msr;
|
||||
current->thread.regs->msr &= ~msr;
|
||||
vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
|
||||
kvmppc_recalc_shadow_msr(vcpu);
|
||||
}
|
||||
|
||||
@ -544,47 +547,56 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
/* We already own the ext */
|
||||
if (vcpu->arch.guest_owned_ext & msr) {
|
||||
return RESUME_GUEST;
|
||||
if (msr == MSR_VSX) {
|
||||
/* No VSX? Give an illegal instruction interrupt */
|
||||
#ifdef CONFIG_VSX
|
||||
if (!cpu_has_feature(CPU_FTR_VSX))
|
||||
#endif
|
||||
{
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to load up all the FP and VMX registers before
|
||||
* we can let the guest use VSX instructions.
|
||||
*/
|
||||
msr = MSR_FP | MSR_VEC | MSR_VSX;
|
||||
}
|
||||
|
||||
/* See if we already own all the ext(s) needed */
|
||||
msr &= ~vcpu->arch.guest_owned_ext;
|
||||
if (!msr)
|
||||
return RESUME_GUEST;
|
||||
|
||||
#ifdef DEBUG_EXT
|
||||
printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
|
||||
#endif
|
||||
|
||||
current->thread.regs->msr |= msr;
|
||||
|
||||
switch (msr) {
|
||||
case MSR_FP:
|
||||
if (msr & MSR_FP) {
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
|
||||
thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
|
||||
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
|
||||
#endif
|
||||
t->fpscr.val = vcpu->arch.fpscr;
|
||||
t->fpexc_mode = 0;
|
||||
kvmppc_load_up_fpu();
|
||||
break;
|
||||
case MSR_VEC:
|
||||
}
|
||||
|
||||
if (msr & MSR_VEC) {
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
|
||||
t->vscr = vcpu->arch.vscr;
|
||||
t->vrsave = -1;
|
||||
kvmppc_load_up_altivec();
|
||||
#endif
|
||||
break;
|
||||
case MSR_VSX:
|
||||
#ifdef CONFIG_VSX
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
|
||||
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
|
||||
kvmppc_load_up_vsx();
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
vcpu->arch.guest_owned_ext |= msr;
|
||||
|
||||
kvmppc_recalc_shadow_msr(vcpu);
|
||||
|
||||
return RESUME_GUEST;
|
||||
@ -1134,7 +1146,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
/* Save VSX state in stack */
|
||||
used_vsr = current->thread.used_vsr;
|
||||
if (used_vsr && (current->thread.regs->msr & MSR_VSX))
|
||||
__giveup_vsx(current);
|
||||
__giveup_vsx(current);
|
||||
#endif
|
||||
|
||||
/* Remember the MSR with disabled extensions */
|
||||
@ -1151,14 +1163,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
/* No need for kvm_guest_exit. It's done in handle_exit.
|
||||
We also get here with interrupts enabled. */
|
||||
|
||||
/* Make sure we save the guest FPU/Altivec/VSX state */
|
||||
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
|
||||
|
||||
current->thread.regs->msr = ext_msr;
|
||||
|
||||
/* Make sure we save the guest FPU/Altivec/VSX state */
|
||||
kvmppc_giveup_ext(vcpu, MSR_FP);
|
||||
kvmppc_giveup_ext(vcpu, MSR_VEC);
|
||||
kvmppc_giveup_ext(vcpu, MSR_VSX);
|
||||
|
||||
/* Restore FPU state from stack */
|
||||
/* Restore FPU/VSX state from stack */
|
||||
memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
|
||||
current->thread.fpscr.val = fpscr;
|
||||
current->thread.fpexc_mode = fpexc_mode;
|
||||
|
@ -234,8 +234,5 @@ define_load_up(fpu)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
define_load_up(altivec)
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
define_load_up(vsx)
|
||||
#endif
|
||||
|
||||
#include "book3s_segment.S"
|
||||
|
Loading…
Reference in New Issue
Block a user