forked from Minki/linux
Merge branch 'kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm-queue
Conflicts: arch/powerpc/kvm/book3s_hv_rmhandlers.S arch/powerpc/kvm/booke.c
This commit is contained in:
commit
b73117c493
@ -1838,6 +1838,7 @@ registers, find a list below:
|
||||
PPC | KVM_REG_PPC_LPCR | 64
|
||||
PPC | KVM_REG_PPC_PPR | 64
|
||||
PPC | KVM_REG_PPC_ARCH_COMPAT 32
|
||||
PPC | KVM_REG_PPC_DABRX | 32
|
||||
PPC | KVM_REG_PPC_TM_GPR0 | 64
|
||||
...
|
||||
PPC | KVM_REG_PPC_TM_GPR31 | 64
|
||||
|
@ -343,6 +343,8 @@ config PPC_TRANSACTIONAL_MEM
|
||||
bool "Transactional Memory support for POWERPC"
|
||||
depends on PPC_BOOK3S_64
|
||||
depends on SMP
|
||||
select ALTIVEC
|
||||
select VSX
|
||||
default n
|
||||
---help---
|
||||
Support user-mode Transactional Memory on POWERPC.
|
||||
|
@ -460,5 +460,116 @@ static inline unsigned int ev_idle(void)
|
||||
|
||||
return r3;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EPAPR_PARAVIRT
|
||||
static inline unsigned long epapr_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr)
|
||||
{
|
||||
unsigned long register r0 asm("r0");
|
||||
unsigned long register r3 asm("r3") = in[0];
|
||||
unsigned long register r4 asm("r4") = in[1];
|
||||
unsigned long register r5 asm("r5") = in[2];
|
||||
unsigned long register r6 asm("r6") = in[3];
|
||||
unsigned long register r7 asm("r7") = in[4];
|
||||
unsigned long register r8 asm("r8") = in[5];
|
||||
unsigned long register r9 asm("r9") = in[6];
|
||||
unsigned long register r10 asm("r10") = in[7];
|
||||
unsigned long register r11 asm("r11") = nr;
|
||||
unsigned long register r12 asm("r12");
|
||||
|
||||
asm volatile("bl epapr_hypercall_start"
|
||||
: "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
|
||||
"=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
|
||||
"=r"(r12)
|
||||
: "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
|
||||
"r"(r9), "r"(r10), "r"(r11)
|
||||
: "memory", "cc", "xer", "ctr", "lr");
|
||||
|
||||
out[0] = r4;
|
||||
out[1] = r5;
|
||||
out[2] = r6;
|
||||
out[3] = r7;
|
||||
out[4] = r8;
|
||||
out[5] = r9;
|
||||
out[6] = r10;
|
||||
out[7] = r11;
|
||||
|
||||
return r3;
|
||||
}
|
||||
#else
|
||||
static unsigned long epapr_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr)
|
||||
{
|
||||
return EV_UNIMPLEMENTED;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
unsigned long r;
|
||||
|
||||
r = epapr_hypercall(in, out, nr);
|
||||
*r2 = out[0];
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall0(unsigned int nr)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
in[2] = p3;
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
in[2] = p3;
|
||||
in[3] = p4;
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _EPAPR_HCALLS_H */
|
||||
|
@ -91,14 +91,17 @@
|
||||
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
|
||||
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
|
||||
#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
|
||||
#define BOOK3S_INTERRUPT_DOORBELL 0xa00
|
||||
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
|
||||
#define BOOK3S_INTERRUPT_TRACE 0xd00
|
||||
#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
|
||||
#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
|
||||
#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
|
||||
#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
|
||||
#define BOOK3S_INTERRUPT_PERFMON 0xf00
|
||||
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
|
||||
#define BOOK3S_INTERRUPT_VSX 0xf40
|
||||
#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
|
||||
|
||||
#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
|
||||
#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
|
||||
|
@ -186,9 +186,6 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
|
||||
|
||||
extern void kvmppc_entry_trampoline(void);
|
||||
extern void kvmppc_hv_entry_trampoline(void);
|
||||
extern void kvmppc_load_up_fpu(void);
|
||||
extern void kvmppc_load_up_altivec(void);
|
||||
extern void kvmppc_load_up_vsx(void);
|
||||
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
|
||||
@ -271,16 +268,25 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.pc;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong pc = kvmppc_get_pc(vcpu);
|
||||
return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
|
||||
{
|
||||
/* Load the instruction manually if it failed to do so in the
|
||||
* exit path */
|
||||
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
|
||||
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
|
||||
|
||||
return vcpu->arch.last_inst;
|
||||
return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
|
||||
vcpu->arch.last_inst;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -290,14 +296,7 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong pc = kvmppc_get_pc(vcpu) - 4;
|
||||
|
||||
/* Load the instruction manually if it failed to do so in the
|
||||
* exit path */
|
||||
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
|
||||
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
|
||||
|
||||
return vcpu->arch.last_inst;
|
||||
return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
||||
|
@ -88,6 +88,7 @@ struct kvmppc_host_state {
|
||||
u8 hwthread_req;
|
||||
u8 hwthread_state;
|
||||
u8 host_ipi;
|
||||
u8 ptid;
|
||||
struct kvm_vcpu *kvm_vcpu;
|
||||
struct kvmppc_vcore *kvm_vcore;
|
||||
unsigned long xics_phys;
|
||||
|
@ -63,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.xer;
|
||||
}
|
||||
|
||||
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* XXX Would need to check TLB entry */
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.last_inst;
|
||||
|
@ -288,6 +288,7 @@ struct kvmppc_vcore {
|
||||
int n_woken;
|
||||
int nap_count;
|
||||
int napping_threads;
|
||||
int first_vcpuid;
|
||||
u16 pcpu;
|
||||
u16 last_cpu;
|
||||
u8 vcore_state;
|
||||
@ -298,10 +299,12 @@ struct kvmppc_vcore {
|
||||
u64 stolen_tb;
|
||||
u64 preempt_tb;
|
||||
struct kvm_vcpu *runner;
|
||||
struct kvm *kvm;
|
||||
u64 tb_offset; /* guest timebase - host timebase */
|
||||
ulong lpcr;
|
||||
u32 arch_compat;
|
||||
ulong pcr;
|
||||
ulong dpdes; /* doorbell state (POWER8) */
|
||||
};
|
||||
|
||||
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
|
||||
@ -410,8 +413,7 @@ struct kvm_vcpu_arch {
|
||||
|
||||
ulong gpr[32];
|
||||
|
||||
u64 fpr[32];
|
||||
u64 fpscr;
|
||||
struct thread_fp_state fp;
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
ulong evr[32];
|
||||
@ -420,12 +422,7 @@ struct kvm_vcpu_arch {
|
||||
u64 acc;
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
vector128 vr[32];
|
||||
vector128 vscr;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
u64 vsr[64];
|
||||
struct thread_vr_state vr;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
@ -452,6 +449,7 @@ struct kvm_vcpu_arch {
|
||||
ulong pc;
|
||||
ulong ctr;
|
||||
ulong lr;
|
||||
ulong tar;
|
||||
|
||||
ulong xer;
|
||||
u32 cr;
|
||||
@ -461,13 +459,30 @@ struct kvm_vcpu_arch {
|
||||
ulong guest_owned_ext;
|
||||
ulong purr;
|
||||
ulong spurr;
|
||||
ulong ic;
|
||||
ulong vtb;
|
||||
ulong dscr;
|
||||
ulong amr;
|
||||
ulong uamor;
|
||||
ulong iamr;
|
||||
u32 ctrl;
|
||||
u32 dabrx;
|
||||
ulong dabr;
|
||||
ulong dawr;
|
||||
ulong dawrx;
|
||||
ulong ciabr;
|
||||
ulong cfar;
|
||||
ulong ppr;
|
||||
ulong pspb;
|
||||
ulong fscr;
|
||||
ulong ebbhr;
|
||||
ulong ebbrr;
|
||||
ulong bescr;
|
||||
ulong csigr;
|
||||
ulong tacr;
|
||||
ulong tcscr;
|
||||
ulong acop;
|
||||
ulong wort;
|
||||
ulong shadow_srr1;
|
||||
#endif
|
||||
u32 vrsave; /* also USPRG0 */
|
||||
@ -502,10 +517,33 @@ struct kvm_vcpu_arch {
|
||||
u32 ccr1;
|
||||
u32 dbsr;
|
||||
|
||||
u64 mmcr[3];
|
||||
u64 mmcr[5];
|
||||
u32 pmc[8];
|
||||
u32 spmc[2];
|
||||
u64 siar;
|
||||
u64 sdar;
|
||||
u64 sier;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
u64 tfhar;
|
||||
u64 texasr;
|
||||
u64 tfiar;
|
||||
|
||||
u32 cr_tm;
|
||||
u64 lr_tm;
|
||||
u64 ctr_tm;
|
||||
u64 amr_tm;
|
||||
u64 ppr_tm;
|
||||
u64 dscr_tm;
|
||||
u64 tar_tm;
|
||||
|
||||
ulong gpr_tm[32];
|
||||
|
||||
struct thread_fp_state fp_tm;
|
||||
|
||||
struct thread_vr_state vr_tm;
|
||||
u32 vrsave_tm; /* also USPRG0 */
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||
struct mutex exit_timing_lock;
|
||||
@ -546,6 +584,7 @@ struct kvm_vcpu_arch {
|
||||
#endif
|
||||
gpa_t paddr_accessed;
|
||||
gva_t vaddr_accessed;
|
||||
pgd_t *pgdir;
|
||||
|
||||
u8 io_gpr; /* GPR used as IO source/target */
|
||||
u8 mmio_is_bigendian;
|
||||
@ -603,7 +642,6 @@ struct kvm_vcpu_arch {
|
||||
struct list_head run_list;
|
||||
struct task_struct *run_task;
|
||||
struct kvm_run *kvm_run;
|
||||
pgd_t *pgdir;
|
||||
|
||||
spinlock_t vpa_update_lock;
|
||||
struct kvmppc_vpa vpa;
|
||||
@ -616,9 +654,12 @@ struct kvm_vcpu_arch {
|
||||
spinlock_t tbacct_lock;
|
||||
u64 busy_stolen;
|
||||
u64 busy_preempt;
|
||||
unsigned long intr_msr;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
|
||||
|
||||
/* Values for vcpu->arch.state */
|
||||
#define KVMPPC_VCPU_NOTREADY 0
|
||||
#define KVMPPC_VCPU_RUNNABLE 1
|
||||
|
@ -39,10 +39,6 @@ static inline int kvm_para_available(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
extern unsigned long kvm_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr);
|
||||
|
||||
#else
|
||||
|
||||
static inline int kvm_para_available(void)
|
||||
@ -50,82 +46,8 @@ static inline int kvm_para_available(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long kvm_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr)
|
||||
{
|
||||
return EV_UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
unsigned long r;
|
||||
|
||||
r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
*r2 = out[0];
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall0(unsigned int nr)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
in[2] = p3;
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
in[2] = p3;
|
||||
in[3] = p4;
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
|
||||
static inline unsigned int kvm_arch_para_features(void)
|
||||
{
|
||||
unsigned long r;
|
||||
@ -133,7 +55,7 @@ static inline unsigned int kvm_arch_para_features(void)
|
||||
if (!kvm_para_available())
|
||||
return 0;
|
||||
|
||||
if(kvm_hypercall0_1(KVM_HC_FEATURES, &r))
|
||||
if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
|
||||
return 0;
|
||||
|
||||
return r;
|
||||
|
@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void);
|
||||
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_bigendian);
|
||||
int is_default_endian);
|
||||
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_bigendian);
|
||||
int is_default_endian);
|
||||
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
u64 val, unsigned int bytes, int is_bigendian);
|
||||
u64 val, unsigned int bytes,
|
||||
int is_default_endian);
|
||||
|
||||
extern int kvmppc_emulate_instruction(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
@ -455,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
|
||||
trace_hardirqs_on();
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* To avoid races, the caller must have gone directly from having
|
||||
* interrupts fully-enabled to hard-disabled.
|
||||
*/
|
||||
WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
|
||||
|
||||
/* Only need to enable IRQs by hard enabling them after this */
|
||||
local_paca->irq_happened = 0;
|
||||
local_paca->soft_enabled = 1;
|
||||
|
@ -223,6 +223,27 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||
#endif
|
||||
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
unsigned *shift);
|
||||
|
||||
static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
|
||||
unsigned long *pte_sizep)
|
||||
{
|
||||
pte_t *ptep;
|
||||
unsigned long ps = *pte_sizep;
|
||||
unsigned int shift;
|
||||
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
|
||||
if (!ptep)
|
||||
return NULL;
|
||||
if (shift)
|
||||
*pte_sizep = 1ul << shift;
|
||||
else
|
||||
*pte_sizep = PAGE_SIZE;
|
||||
|
||||
if (ps > *pte_sizep)
|
||||
return NULL;
|
||||
|
||||
return ptep;
|
||||
}
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -223,17 +223,26 @@
|
||||
#define CTRL_TE 0x00c00000 /* thread enable */
|
||||
#define CTRL_RUNLATCH 0x1
|
||||
#define SPRN_DAWR 0xB4
|
||||
#define SPRN_CIABR 0xBB
|
||||
#define CIABR_PRIV 0x3
|
||||
#define CIABR_PRIV_USER 1
|
||||
#define CIABR_PRIV_SUPER 2
|
||||
#define CIABR_PRIV_HYPER 3
|
||||
#define SPRN_DAWRX 0xBC
|
||||
#define DAWRX_USER (1UL << 0)
|
||||
#define DAWRX_KERNEL (1UL << 1)
|
||||
#define DAWRX_HYP (1UL << 2)
|
||||
#define DAWRX_USER __MASK(0)
|
||||
#define DAWRX_KERNEL __MASK(1)
|
||||
#define DAWRX_HYP __MASK(2)
|
||||
#define DAWRX_WTI __MASK(3)
|
||||
#define DAWRX_WT __MASK(4)
|
||||
#define DAWRX_DR __MASK(5)
|
||||
#define DAWRX_DW __MASK(6)
|
||||
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
|
||||
#define SPRN_DABR2 0x13D /* e300 */
|
||||
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
|
||||
#define DABRX_USER (1UL << 0)
|
||||
#define DABRX_KERNEL (1UL << 1)
|
||||
#define DABRX_HYP (1UL << 2)
|
||||
#define DABRX_BTI (1UL << 3)
|
||||
#define DABRX_USER __MASK(0)
|
||||
#define DABRX_KERNEL __MASK(1)
|
||||
#define DABRX_HYP __MASK(2)
|
||||
#define DABRX_BTI __MASK(3)
|
||||
#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
|
||||
#define SPRN_DAR 0x013 /* Data Address Register */
|
||||
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
|
||||
@ -260,6 +269,8 @@
|
||||
#define SPRN_HRMOR 0x139 /* Real mode offset register */
|
||||
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
|
||||
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
|
||||
#define SPRN_IC 0x350 /* Virtual Instruction Count */
|
||||
#define SPRN_VTB 0x351 /* Virtual Time Base */
|
||||
/* HFSCR and FSCR bit numbers are the same */
|
||||
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
|
||||
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
|
||||
@ -298,9 +309,13 @@
|
||||
#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
|
||||
#define LPCR_RMLS_SH (63-37)
|
||||
#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
|
||||
#define LPCR_AIL 0x01800000 /* Alternate interrupt location */
|
||||
#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
|
||||
#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
|
||||
#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
|
||||
#define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */
|
||||
#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */
|
||||
#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */
|
||||
#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */
|
||||
#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
|
||||
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
|
||||
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
|
||||
@ -322,6 +337,8 @@
|
||||
#define SPRN_PCR 0x152 /* Processor compatibility register */
|
||||
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
|
||||
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
|
||||
#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
|
||||
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
|
||||
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
|
||||
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
|
||||
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
|
||||
@ -368,6 +385,8 @@
|
||||
#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
|
||||
#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
|
||||
#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
|
||||
#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */
|
||||
#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
|
||||
#define SPRN_EAR 0x11A /* External Address Register */
|
||||
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
|
||||
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
|
||||
@ -427,6 +446,7 @@
|
||||
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
|
||||
#define SPRN_IABR2 0x3FA /* 83xx */
|
||||
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
|
||||
#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */
|
||||
#define SPRN_HID4 0x3F4 /* 970 HID4 */
|
||||
#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
|
||||
#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
|
||||
@ -541,6 +561,7 @@
|
||||
#define SPRN_PIR 0x3FF /* Processor Identification Register */
|
||||
#endif
|
||||
#define SPRN_TIR 0x1BE /* Thread Identification Register */
|
||||
#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */
|
||||
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
|
||||
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
|
||||
#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
|
||||
@ -682,6 +703,7 @@
|
||||
#define SPRN_EBBHR 804 /* Event based branch handler register */
|
||||
#define SPRN_EBBRR 805 /* Event based branch return register */
|
||||
#define SPRN_BESCR 806 /* Branch event status and control register */
|
||||
#define SPRN_WORT 895 /* Workload optimization register - thread */
|
||||
|
||||
#define SPRN_PMC1 787
|
||||
#define SPRN_PMC2 788
|
||||
@ -698,6 +720,11 @@
|
||||
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
|
||||
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
|
||||
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
|
||||
#define SPRN_TACR 888
|
||||
#define SPRN_TCSCR 889
|
||||
#define SPRN_CSIGR 890
|
||||
#define SPRN_SPMC1 892
|
||||
#define SPRN_SPMC2 893
|
||||
|
||||
/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
|
||||
#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
|
||||
|
@ -25,10 +25,8 @@ static inline void save_tar(struct thread_struct *prev)
|
||||
static inline void save_tar(struct thread_struct *prev) {}
|
||||
#endif
|
||||
|
||||
extern void load_up_fpu(void);
|
||||
extern void enable_kernel_fp(void);
|
||||
extern void enable_kernel_altivec(void);
|
||||
extern void load_up_altivec(struct task_struct *);
|
||||
extern int emulate_altivec(struct pt_regs *);
|
||||
extern void __giveup_vsx(struct task_struct *);
|
||||
extern void giveup_vsx(struct task_struct *);
|
||||
|
@ -545,6 +545,7 @@ struct kvm_get_htab_header {
|
||||
#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
|
||||
#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
|
||||
#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
|
||||
#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
|
||||
|
||||
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
|
||||
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
|
||||
@ -553,6 +554,8 @@ struct kvm_get_htab_header {
|
||||
/* Architecture compatibility level */
|
||||
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
|
||||
|
||||
#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
|
||||
|
||||
/* Transactional Memory checkpointed state:
|
||||
* This is all GPRs, all VSX regs and a subset of SPRs
|
||||
*/
|
||||
|
@ -6,6 +6,8 @@
|
||||
* the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
|
||||
*/
|
||||
#define TM_CAUSE_PERSISTENT 0x01
|
||||
#define TM_CAUSE_KVM_RESCHED 0xe0 /* From PAPR */
|
||||
#define TM_CAUSE_KVM_FAC_UNAV 0xe2 /* From PAPR */
|
||||
#define TM_CAUSE_RESCHED 0xde
|
||||
#define TM_CAUSE_TLBI 0xdc
|
||||
#define TM_CAUSE_FAC_UNAV 0xda
|
||||
|
@ -425,18 +425,14 @@ int main(void)
|
||||
DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
|
||||
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
|
||||
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
|
||||
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
|
||||
DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
|
||||
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
|
||||
DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
|
||||
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
|
||||
#endif
|
||||
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
|
||||
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
|
||||
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
|
||||
DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
|
||||
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
|
||||
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
@ -484,16 +480,24 @@ int main(void)
|
||||
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
|
||||
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
|
||||
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
|
||||
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
|
||||
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
|
||||
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
|
||||
DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
|
||||
DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
|
||||
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
|
||||
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
|
||||
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
|
||||
DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
|
||||
DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
|
||||
DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
|
||||
DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx));
|
||||
DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
|
||||
DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
|
||||
DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
|
||||
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
|
||||
DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
|
||||
DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
|
||||
@ -502,8 +506,10 @@ int main(void)
|
||||
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
|
||||
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
|
||||
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
|
||||
DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc));
|
||||
DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
|
||||
DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
|
||||
DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier));
|
||||
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
|
||||
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
|
||||
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
|
||||
@ -511,20 +517,47 @@ int main(void)
|
||||
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
|
||||
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
|
||||
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
|
||||
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
|
||||
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
|
||||
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
|
||||
DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
|
||||
DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
|
||||
DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
|
||||
DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
|
||||
DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
|
||||
DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr));
|
||||
DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr));
|
||||
DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
|
||||
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
|
||||
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
|
||||
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
|
||||
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
|
||||
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
|
||||
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
|
||||
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
|
||||
DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
|
||||
DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
|
||||
DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
|
||||
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
|
||||
DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
|
||||
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
|
||||
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
|
||||
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
|
||||
DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
|
||||
DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
|
||||
DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
|
||||
DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
|
||||
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
|
||||
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
|
||||
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
|
||||
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
|
||||
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
|
||||
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
|
||||
DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
|
||||
DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
|
||||
DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
@ -589,6 +622,7 @@ int main(void)
|
||||
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
|
||||
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
|
||||
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
|
||||
HSTATE_FIELD(HSTATE_PTID, ptid);
|
||||
HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
|
||||
HSTATE_FIELD(HSTATE_PMC, host_pmc);
|
||||
HSTATE_FIELD(HSTATE_PURR, host_purr);
|
||||
|
@ -413,13 +413,13 @@ static void kvm_map_magic_page(void *data)
|
||||
{
|
||||
u32 *features = data;
|
||||
|
||||
ulong in[8];
|
||||
ulong in[8] = {0};
|
||||
ulong out[8];
|
||||
|
||||
in[0] = KVM_MAGIC_PAGE;
|
||||
in[1] = KVM_MAGIC_PAGE;
|
||||
|
||||
kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
|
||||
epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
|
||||
|
||||
*features = out[0];
|
||||
}
|
||||
@ -711,43 +711,6 @@ static void kvm_use_magic_page(void)
|
||||
kvm_patching_worked ? "worked" : "failed");
|
||||
}
|
||||
|
||||
unsigned long kvm_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr)
|
||||
{
|
||||
unsigned long register r0 asm("r0");
|
||||
unsigned long register r3 asm("r3") = in[0];
|
||||
unsigned long register r4 asm("r4") = in[1];
|
||||
unsigned long register r5 asm("r5") = in[2];
|
||||
unsigned long register r6 asm("r6") = in[3];
|
||||
unsigned long register r7 asm("r7") = in[4];
|
||||
unsigned long register r8 asm("r8") = in[5];
|
||||
unsigned long register r9 asm("r9") = in[6];
|
||||
unsigned long register r10 asm("r10") = in[7];
|
||||
unsigned long register r11 asm("r11") = nr;
|
||||
unsigned long register r12 asm("r12");
|
||||
|
||||
asm volatile("bl epapr_hypercall_start"
|
||||
: "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
|
||||
"=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
|
||||
"=r"(r12)
|
||||
: "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
|
||||
"r"(r9), "r"(r10), "r"(r11)
|
||||
: "memory", "cc", "xer", "ctr", "lr");
|
||||
|
||||
out[0] = r4;
|
||||
out[1] = r5;
|
||||
out[2] = r6;
|
||||
out[3] = r7;
|
||||
out[4] = r8;
|
||||
out[5] = r9;
|
||||
out[6] = r10;
|
||||
out[7] = r11;
|
||||
|
||||
return r3;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_hypercall);
|
||||
|
||||
static __init void kvm_free_tmp(void)
|
||||
{
|
||||
free_reserved_area(&kvm_tmp[kvm_tmp_index],
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
@ -231,3 +233,5 @@ static void __exit kvmppc_44x_exit(void)
|
||||
|
||||
module_init(kvmppc_44x_init);
|
||||
module_exit(kvmppc_44x_exit);
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
|
@ -18,6 +18,8 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
@ -575,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
break;
|
||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||
i = reg->id - KVM_REG_PPC_FPR0;
|
||||
val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
|
||||
val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
|
||||
break;
|
||||
case KVM_REG_PPC_FPSCR:
|
||||
val = get_reg_val(reg->id, vcpu->arch.fpscr);
|
||||
val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
|
||||
break;
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
|
||||
@ -586,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
|
||||
val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
|
||||
break;
|
||||
case KVM_REG_PPC_VSCR:
|
||||
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
|
||||
val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
|
||||
break;
|
||||
case KVM_REG_PPC_VRSAVE:
|
||||
val = get_reg_val(reg->id, vcpu->arch.vrsave);
|
||||
break;
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
long int i = reg->id - KVM_REG_PPC_VSR0;
|
||||
val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
|
||||
val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
|
||||
} else {
|
||||
r = -ENXIO;
|
||||
}
|
||||
break;
|
||||
#endif /* CONFIG_VSX */
|
||||
case KVM_REG_PPC_DEBUG_INST: {
|
||||
u32 opcode = INS_TW;
|
||||
r = copy_to_user((u32 __user *)(long)reg->addr,
|
||||
@ -654,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
break;
|
||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||
i = reg->id - KVM_REG_PPC_FPR0;
|
||||
vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
|
||||
VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_FPSCR:
|
||||
vcpu->arch.fpscr = set_reg_val(reg->id, val);
|
||||
vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
|
||||
break;
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
|
||||
@ -665,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
|
||||
vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
|
||||
break;
|
||||
case KVM_REG_PPC_VSCR:
|
||||
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
|
||||
vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_VRSAVE:
|
||||
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
||||
@ -682,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
vcpu->arch.vrsave = set_reg_val(reg->id, val);
|
||||
break;
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
long int i = reg->id - KVM_REG_PPC_VSR0;
|
||||
vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
|
||||
vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
|
||||
} else {
|
||||
r = -ENXIO;
|
||||
}
|
||||
break;
|
||||
#endif /* CONFIG_VSX */
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
case KVM_REG_PPC_ICP_STATE:
|
||||
if (!vcpu->arch.icp) {
|
||||
@ -879,3 +903,9 @@ static void kvmppc_book3s_exit(void)
|
||||
|
||||
module_init(kvmppc_book3s_init);
|
||||
module_exit(kvmppc_book3s_exit);
|
||||
|
||||
/* On 32bit this is our one and only kernel module */
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
#endif
|
||||
|
@ -243,6 +243,11 @@ next_pteg:
|
||||
/* Now tell our Shadow PTE code about the new page */
|
||||
|
||||
pte = kvmppc_mmu_hpte_cache_next(vcpu);
|
||||
if (!pte) {
|
||||
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
|
||||
r = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
|
||||
orig_pte->may_write ? 'w' : '-',
|
||||
|
@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void)
|
||||
|
||||
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
|
||||
kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -562,7 +562,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
* we just return and retry the instruction.
|
||||
*/
|
||||
|
||||
if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
|
||||
if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store)
|
||||
return RESUME_GUEST;
|
||||
|
||||
/*
|
||||
|
@ -25,9 +25,5 @@ EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
|
||||
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/srcu.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
@ -85,10 +86,13 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* CPU points to the first thread of the core */
|
||||
if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
int real_cpu = cpu + vcpu->arch.ptid;
|
||||
if (paca[real_cpu].kvm_hstate.xics_phys)
|
||||
xics_wake_cpu(real_cpu);
|
||||
else if (cpu_online(cpu))
|
||||
else
|
||||
#endif
|
||||
if (cpu_online(cpu))
|
||||
smp_send_reschedule(cpu);
|
||||
}
|
||||
put_cpu();
|
||||
@ -182,14 +186,28 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
|
||||
|
||||
switch (arch_compat) {
|
||||
case PVR_ARCH_205:
|
||||
pcr = PCR_ARCH_205;
|
||||
/*
|
||||
* If an arch bit is set in PCR, all the defined
|
||||
* higher-order arch bits also have to be set.
|
||||
*/
|
||||
pcr = PCR_ARCH_206 | PCR_ARCH_205;
|
||||
break;
|
||||
case PVR_ARCH_206:
|
||||
case PVR_ARCH_206p:
|
||||
pcr = PCR_ARCH_206;
|
||||
break;
|
||||
case PVR_ARCH_207:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
||||
/* POWER7 can't emulate POWER8 */
|
||||
if (!(pcr & PCR_ARCH_206))
|
||||
return -EINVAL;
|
||||
pcr &= ~PCR_ARCH_206;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&vc->lock);
|
||||
@ -637,6 +655,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
case BOOK3S_INTERRUPT_EXTERNAL:
|
||||
case BOOK3S_INTERRUPT_H_DOORBELL:
|
||||
vcpu->stat.ext_intr_exits++;
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
@ -673,12 +692,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
/* hcall - punt to userspace */
|
||||
int i;
|
||||
|
||||
if (vcpu->arch.shregs.msr & MSR_PR) {
|
||||
/* sc 1 from userspace - reflect to guest syscall */
|
||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
}
|
||||
/* hypercall with MSR_PR has already been handled in rmode,
|
||||
* and never reaches here.
|
||||
*/
|
||||
|
||||
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
|
||||
for (i = 0; i < 9; ++i)
|
||||
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
|
||||
@ -708,7 +725,16 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
* we don't emulate any guest instructions at this stage.
|
||||
*/
|
||||
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
|
||||
kvmppc_core_queue_program(vcpu, 0x80000);
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
/*
|
||||
* This occurs if the guest (kernel or userspace), does something that
|
||||
* is prohibited by HFSCR. We just generate a program interrupt to
|
||||
* the guest.
|
||||
*/
|
||||
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
default:
|
||||
@ -765,11 +791,35 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
|
||||
u64 mask;
|
||||
|
||||
spin_lock(&vc->lock);
|
||||
/*
|
||||
* If ILE (interrupt little-endian) has changed, update the
|
||||
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
|
||||
*/
|
||||
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (vcpu->arch.vcore != vc)
|
||||
continue;
|
||||
if (new_lpcr & LPCR_ILE)
|
||||
vcpu->arch.intr_msr |= MSR_LE;
|
||||
else
|
||||
vcpu->arch.intr_msr &= ~MSR_LE;
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Userspace can only modify DPFD (default prefetch depth),
|
||||
* ILE (interrupt little-endian) and TC (translation control).
|
||||
* On POWER8 userspace can also modify AIL (alt. interrupt loc.)
|
||||
*/
|
||||
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
mask |= LPCR_AIL;
|
||||
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
|
||||
spin_unlock(&vc->lock);
|
||||
}
|
||||
@ -787,6 +837,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_DABR:
|
||||
*val = get_reg_val(id, vcpu->arch.dabr);
|
||||
break;
|
||||
case KVM_REG_PPC_DABRX:
|
||||
*val = get_reg_val(id, vcpu->arch.dabrx);
|
||||
break;
|
||||
case KVM_REG_PPC_DSCR:
|
||||
*val = get_reg_val(id, vcpu->arch.dscr);
|
||||
break;
|
||||
@ -802,7 +855,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_UAMOR:
|
||||
*val = get_reg_val(id, vcpu->arch.uamor);
|
||||
break;
|
||||
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
|
||||
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
|
||||
i = id - KVM_REG_PPC_MMCR0;
|
||||
*val = get_reg_val(id, vcpu->arch.mmcr[i]);
|
||||
break;
|
||||
@ -810,33 +863,87 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
i = id - KVM_REG_PPC_PMC1;
|
||||
*val = get_reg_val(id, vcpu->arch.pmc[i]);
|
||||
break;
|
||||
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
|
||||
i = id - KVM_REG_PPC_SPMC1;
|
||||
*val = get_reg_val(id, vcpu->arch.spmc[i]);
|
||||
break;
|
||||
case KVM_REG_PPC_SIAR:
|
||||
*val = get_reg_val(id, vcpu->arch.siar);
|
||||
break;
|
||||
case KVM_REG_PPC_SDAR:
|
||||
*val = get_reg_val(id, vcpu->arch.sdar);
|
||||
break;
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
/* VSX => FP reg i is stored in arch.vsr[2*i] */
|
||||
long int i = id - KVM_REG_PPC_FPR0;
|
||||
*val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
|
||||
} else {
|
||||
/* let generic code handle it */
|
||||
r = -EINVAL;
|
||||
}
|
||||
case KVM_REG_PPC_SIER:
|
||||
*val = get_reg_val(id, vcpu->arch.sier);
|
||||
break;
|
||||
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
long int i = id - KVM_REG_PPC_VSR0;
|
||||
val->vsxval[0] = vcpu->arch.vsr[2 * i];
|
||||
val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
|
||||
} else {
|
||||
r = -ENXIO;
|
||||
}
|
||||
case KVM_REG_PPC_IAMR:
|
||||
*val = get_reg_val(id, vcpu->arch.iamr);
|
||||
break;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case KVM_REG_PPC_TFHAR:
|
||||
*val = get_reg_val(id, vcpu->arch.tfhar);
|
||||
break;
|
||||
case KVM_REG_PPC_TFIAR:
|
||||
*val = get_reg_val(id, vcpu->arch.tfiar);
|
||||
break;
|
||||
case KVM_REG_PPC_TEXASR:
|
||||
*val = get_reg_val(id, vcpu->arch.texasr);
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_PPC_FSCR:
|
||||
*val = get_reg_val(id, vcpu->arch.fscr);
|
||||
break;
|
||||
case KVM_REG_PPC_PSPB:
|
||||
*val = get_reg_val(id, vcpu->arch.pspb);
|
||||
break;
|
||||
case KVM_REG_PPC_EBBHR:
|
||||
*val = get_reg_val(id, vcpu->arch.ebbhr);
|
||||
break;
|
||||
case KVM_REG_PPC_EBBRR:
|
||||
*val = get_reg_val(id, vcpu->arch.ebbrr);
|
||||
break;
|
||||
case KVM_REG_PPC_BESCR:
|
||||
*val = get_reg_val(id, vcpu->arch.bescr);
|
||||
break;
|
||||
case KVM_REG_PPC_TAR:
|
||||
*val = get_reg_val(id, vcpu->arch.tar);
|
||||
break;
|
||||
case KVM_REG_PPC_DPDES:
|
||||
*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
|
||||
break;
|
||||
case KVM_REG_PPC_DAWR:
|
||||
*val = get_reg_val(id, vcpu->arch.dawr);
|
||||
break;
|
||||
case KVM_REG_PPC_DAWRX:
|
||||
*val = get_reg_val(id, vcpu->arch.dawrx);
|
||||
break;
|
||||
case KVM_REG_PPC_CIABR:
|
||||
*val = get_reg_val(id, vcpu->arch.ciabr);
|
||||
break;
|
||||
case KVM_REG_PPC_IC:
|
||||
*val = get_reg_val(id, vcpu->arch.ic);
|
||||
break;
|
||||
case KVM_REG_PPC_VTB:
|
||||
*val = get_reg_val(id, vcpu->arch.vtb);
|
||||
break;
|
||||
case KVM_REG_PPC_CSIGR:
|
||||
*val = get_reg_val(id, vcpu->arch.csigr);
|
||||
break;
|
||||
case KVM_REG_PPC_TACR:
|
||||
*val = get_reg_val(id, vcpu->arch.tacr);
|
||||
break;
|
||||
case KVM_REG_PPC_TCSCR:
|
||||
*val = get_reg_val(id, vcpu->arch.tcscr);
|
||||
break;
|
||||
case KVM_REG_PPC_PID:
|
||||
*val = get_reg_val(id, vcpu->arch.pid);
|
||||
break;
|
||||
case KVM_REG_PPC_ACOP:
|
||||
*val = get_reg_val(id, vcpu->arch.acop);
|
||||
break;
|
||||
case KVM_REG_PPC_WORT:
|
||||
*val = get_reg_val(id, vcpu->arch.wort);
|
||||
break;
|
||||
#endif /* CONFIG_VSX */
|
||||
case KVM_REG_PPC_VPA_ADDR:
|
||||
spin_lock(&vcpu->arch.vpa_update_lock);
|
||||
*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
|
||||
@ -890,6 +997,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_DABR:
|
||||
vcpu->arch.dabr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_DABRX:
|
||||
vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
|
||||
break;
|
||||
case KVM_REG_PPC_DSCR:
|
||||
vcpu->arch.dscr = set_reg_val(id, *val);
|
||||
break;
|
||||
@ -905,7 +1015,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_UAMOR:
|
||||
vcpu->arch.uamor = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
|
||||
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
|
||||
i = id - KVM_REG_PPC_MMCR0;
|
||||
vcpu->arch.mmcr[i] = set_reg_val(id, *val);
|
||||
break;
|
||||
@ -913,33 +1023,90 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
i = id - KVM_REG_PPC_PMC1;
|
||||
vcpu->arch.pmc[i] = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
|
||||
i = id - KVM_REG_PPC_SPMC1;
|
||||
vcpu->arch.spmc[i] = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_SIAR:
|
||||
vcpu->arch.siar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_SDAR:
|
||||
vcpu->arch.sdar = set_reg_val(id, *val);
|
||||
break;
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
/* VSX => FP reg i is stored in arch.vsr[2*i] */
|
||||
long int i = id - KVM_REG_PPC_FPR0;
|
||||
vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
|
||||
} else {
|
||||
/* let generic code handle it */
|
||||
r = -EINVAL;
|
||||
}
|
||||
case KVM_REG_PPC_SIER:
|
||||
vcpu->arch.sier = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
long int i = id - KVM_REG_PPC_VSR0;
|
||||
vcpu->arch.vsr[2 * i] = val->vsxval[0];
|
||||
vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
|
||||
} else {
|
||||
r = -ENXIO;
|
||||
}
|
||||
case KVM_REG_PPC_IAMR:
|
||||
vcpu->arch.iamr = set_reg_val(id, *val);
|
||||
break;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case KVM_REG_PPC_TFHAR:
|
||||
vcpu->arch.tfhar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TFIAR:
|
||||
vcpu->arch.tfiar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TEXASR:
|
||||
vcpu->arch.texasr = set_reg_val(id, *val);
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_PPC_FSCR:
|
||||
vcpu->arch.fscr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_PSPB:
|
||||
vcpu->arch.pspb = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_EBBHR:
|
||||
vcpu->arch.ebbhr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_EBBRR:
|
||||
vcpu->arch.ebbrr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_BESCR:
|
||||
vcpu->arch.bescr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TAR:
|
||||
vcpu->arch.tar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_DPDES:
|
||||
vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_DAWR:
|
||||
vcpu->arch.dawr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_DAWRX:
|
||||
vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
|
||||
break;
|
||||
case KVM_REG_PPC_CIABR:
|
||||
vcpu->arch.ciabr = set_reg_val(id, *val);
|
||||
/* Don't allow setting breakpoints in hypervisor code */
|
||||
if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
|
||||
vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
|
||||
break;
|
||||
case KVM_REG_PPC_IC:
|
||||
vcpu->arch.ic = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_VTB:
|
||||
vcpu->arch.vtb = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_CSIGR:
|
||||
vcpu->arch.csigr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TACR:
|
||||
vcpu->arch.tacr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TCSCR:
|
||||
vcpu->arch.tcscr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_PID:
|
||||
vcpu->arch.pid = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_ACOP:
|
||||
vcpu->arch.acop = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_WORT:
|
||||
vcpu->arch.wort = set_reg_val(id, *val);
|
||||
break;
|
||||
#endif /* CONFIG_VSX */
|
||||
case KVM_REG_PPC_VPA_ADDR:
|
||||
addr = set_reg_val(id, *val);
|
||||
r = -EINVAL;
|
||||
@ -1017,6 +1184,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
||||
spin_lock_init(&vcpu->arch.vpa_update_lock);
|
||||
spin_lock_init(&vcpu->arch.tbacct_lock);
|
||||
vcpu->arch.busy_preempt = TB_NIL;
|
||||
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
|
||||
|
||||
kvmppc_mmu_book3s_hv_init(vcpu);
|
||||
|
||||
@ -1034,6 +1202,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
||||
init_waitqueue_head(&vcore->wq);
|
||||
vcore->preempt_tb = TB_NIL;
|
||||
vcore->lpcr = kvm->arch.lpcr;
|
||||
vcore->first_vcpuid = core * threads_per_core;
|
||||
vcore->kvm = kvm;
|
||||
}
|
||||
kvm->arch.vcores[core] = vcore;
|
||||
kvm->arch.online_vcores++;
|
||||
@ -1047,6 +1217,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
||||
++vcore->num_threads;
|
||||
spin_unlock(&vcore->lock);
|
||||
vcpu->arch.vcore = vcore;
|
||||
vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
|
||||
|
||||
vcpu->arch.cpu_type = KVM_CPU_3S_64;
|
||||
kvmppc_sanity_check(vcpu);
|
||||
@ -1110,7 +1281,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
|
||||
extern void __kvmppc_vcore_entry(void);
|
||||
|
||||
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
|
||||
struct kvm_vcpu *vcpu)
|
||||
@ -1184,13 +1355,16 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
|
||||
tpaca = &paca[cpu];
|
||||
tpaca->kvm_hstate.kvm_vcpu = vcpu;
|
||||
tpaca->kvm_hstate.kvm_vcore = vc;
|
||||
tpaca->kvm_hstate.napping = 0;
|
||||
tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
|
||||
vcpu->cpu = vc->pcpu;
|
||||
smp_wmb();
|
||||
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
|
||||
if (vcpu->arch.ptid) {
|
||||
if (cpu != smp_processor_id()) {
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
xics_wake_cpu(cpu);
|
||||
++vc->n_woken;
|
||||
#endif
|
||||
if (vcpu->arch.ptid)
|
||||
++vc->n_woken;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -1247,10 +1421,10 @@ static int on_primary_thread(void)
|
||||
*/
|
||||
static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu, *vcpu0, *vnext;
|
||||
struct kvm_vcpu *vcpu, *vnext;
|
||||
long ret;
|
||||
u64 now;
|
||||
int ptid, i, need_vpa_update;
|
||||
int i, need_vpa_update;
|
||||
int srcu_idx;
|
||||
struct kvm_vcpu *vcpus_to_update[threads_per_core];
|
||||
|
||||
@ -1287,25 +1461,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
spin_lock(&vc->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Assign physical thread IDs, first to non-ceded vcpus
|
||||
* and then to ceded ones.
|
||||
*/
|
||||
ptid = 0;
|
||||
vcpu0 = NULL;
|
||||
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
|
||||
if (!vcpu->arch.ceded) {
|
||||
if (!ptid)
|
||||
vcpu0 = vcpu;
|
||||
vcpu->arch.ptid = ptid++;
|
||||
}
|
||||
}
|
||||
if (!vcpu0)
|
||||
goto out; /* nothing to run; should never happen */
|
||||
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
|
||||
if (vcpu->arch.ceded)
|
||||
vcpu->arch.ptid = ptid++;
|
||||
|
||||
/*
|
||||
* Make sure we are running on thread 0, and that
|
||||
* secondary threads are offline.
|
||||
@ -1322,15 +1477,19 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
kvmppc_create_dtl_entry(vcpu, vc);
|
||||
}
|
||||
|
||||
/* Set this explicitly in case thread 0 doesn't have a vcpu */
|
||||
get_paca()->kvm_hstate.kvm_vcore = vc;
|
||||
get_paca()->kvm_hstate.ptid = 0;
|
||||
|
||||
vc->vcore_state = VCORE_RUNNING;
|
||||
preempt_disable();
|
||||
spin_unlock(&vc->lock);
|
||||
|
||||
kvm_guest_enter();
|
||||
|
||||
srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
|
||||
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
|
||||
|
||||
__kvmppc_vcore_entry(NULL, vcpu0);
|
||||
__kvmppc_vcore_entry();
|
||||
|
||||
spin_lock(&vc->lock);
|
||||
/* disable sending of IPIs on virtual external irqs */
|
||||
@ -1345,7 +1504,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
vc->vcore_state = VCORE_EXITING;
|
||||
spin_unlock(&vc->lock);
|
||||
|
||||
srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
|
||||
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
|
||||
|
||||
/* make sure updates to secondary vcpu structs are visible now */
|
||||
smp_mb();
|
||||
@ -1453,7 +1612,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
if (!signal_pending(current)) {
|
||||
if (vc->vcore_state == VCORE_RUNNING &&
|
||||
VCORE_EXIT_COUNT(vc) == 0) {
|
||||
vcpu->arch.ptid = vc->n_runnable - 1;
|
||||
kvmppc_create_dtl_entry(vcpu, vc);
|
||||
kvmppc_start_thread(vcpu);
|
||||
} else if (vc->vcore_state == VCORE_SLEEPING) {
|
||||
@ -2048,6 +2206,9 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
|
||||
LPCR_VPM0 | LPCR_VPM1;
|
||||
kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
|
||||
(VRMA_VSID << SLB_VSID_SHIFT_1T);
|
||||
/* On POWER8 turn on online bit to enable PURR/SPURR */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
lpcr |= LPCR_ONL;
|
||||
}
|
||||
kvm->arch.lpcr = lpcr;
|
||||
|
||||
@ -2222,3 +2383,5 @@ static void kvmppc_book3s_exit_hv(void)
|
||||
module_init(kvmppc_book3s_init_hv);
|
||||
module_exit(kvmppc_book3s_exit_hv);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
|
@ -35,7 +35,7 @@
|
||||
****************************************************************************/
|
||||
|
||||
/* Registers:
|
||||
* r4: vcpu pointer
|
||||
* none
|
||||
*/
|
||||
_GLOBAL(__kvmppc_vcore_entry)
|
||||
|
||||
@ -57,9 +57,11 @@ BEGIN_FTR_SECTION
|
||||
std r3, HSTATE_DSCR(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
/* Save host DABR */
|
||||
mfspr r3, SPRN_DABR
|
||||
std r3, HSTATE_DABR(r13)
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
|
||||
/* Hard-disable interrupts */
|
||||
mfmsr r10
|
||||
@ -69,7 +71,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
mtmsrd r10,1
|
||||
|
||||
/* Save host PMU registers */
|
||||
/* R4 is live here (vcpu pointer) but not r3 or r5 */
|
||||
li r3, 1
|
||||
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
|
||||
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
|
||||
@ -134,16 +135,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
* enters the guest with interrupts enabled.
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
ld r4, HSTATE_KVM_VCPU(r13)
|
||||
ld r0, VCPU_PENDING_EXC(r4)
|
||||
li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
|
||||
oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
|
||||
and. r0, r0, r7
|
||||
beq 32f
|
||||
mr r31, r4
|
||||
lhz r3, PACAPACAINDEX(r13)
|
||||
bl smp_send_reschedule
|
||||
nop
|
||||
mr r4, r31
|
||||
32:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -134,7 +134,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
|
||||
unlock_rmap(rmap);
|
||||
}
|
||||
|
||||
static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
|
||||
static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
|
||||
int writing, unsigned long *pte_sizep)
|
||||
{
|
||||
pte_t *ptep;
|
||||
@ -232,7 +232,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
|
||||
/* Look up the Linux PTE for the backing page */
|
||||
pte_size = psize;
|
||||
pte = lookup_linux_pte(pgdir, hva, writing, &pte_size);
|
||||
pte = lookup_linux_pte_and_update(pgdir, hva, writing,
|
||||
&pte_size);
|
||||
if (pte_present(pte)) {
|
||||
if (writing && !pte_write(pte))
|
||||
/* make the actual HPTE be read-only */
|
||||
@ -672,7 +673,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
|
||||
if (memslot) {
|
||||
hva = __gfn_to_hva_memslot(memslot, gfn);
|
||||
pte = lookup_linux_pte(pgdir, hva, 1, &psize);
|
||||
pte = lookup_linux_pte_and_update(pgdir, hva,
|
||||
1, &psize);
|
||||
if (pte_present(pte) && !pte_write(pte))
|
||||
r = hpte_make_readonly(r);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -160,7 +160,7 @@
|
||||
|
||||
static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
|
||||
{
|
||||
kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
|
||||
}
|
||||
|
||||
static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
|
||||
@ -207,11 +207,11 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
/* put in registers */
|
||||
switch (ls_type) {
|
||||
case FPU_LS_SINGLE:
|
||||
kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]);
|
||||
kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
|
||||
vcpu->arch.qpr[rs] = *((u32*)tmp);
|
||||
break;
|
||||
case FPU_LS_DOUBLE:
|
||||
vcpu->arch.fpr[rs] = *((u64*)tmp);
|
||||
VCPU_FPR(vcpu, rs) = *((u64*)tmp);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -233,18 +233,18 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
switch (ls_type) {
|
||||
case FPU_LS_SINGLE:
|
||||
kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
|
||||
val = *((u32*)tmp);
|
||||
len = sizeof(u32);
|
||||
break;
|
||||
case FPU_LS_SINGLE_LOW:
|
||||
*((u32*)tmp) = vcpu->arch.fpr[rs];
|
||||
val = vcpu->arch.fpr[rs] & 0xffffffff;
|
||||
*((u32*)tmp) = VCPU_FPR(vcpu, rs);
|
||||
val = VCPU_FPR(vcpu, rs) & 0xffffffff;
|
||||
len = sizeof(u32);
|
||||
break;
|
||||
case FPU_LS_DOUBLE:
|
||||
*((u64*)tmp) = vcpu->arch.fpr[rs];
|
||||
val = vcpu->arch.fpr[rs];
|
||||
*((u64*)tmp) = VCPU_FPR(vcpu, rs);
|
||||
val = VCPU_FPR(vcpu, rs);
|
||||
len = sizeof(u64);
|
||||
break;
|
||||
default:
|
||||
@ -301,7 +301,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
emulated = EMULATE_DONE;
|
||||
|
||||
/* put in registers */
|
||||
kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]);
|
||||
kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
|
||||
vcpu->arch.qpr[rs] = tmp[1];
|
||||
|
||||
dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
|
||||
@ -319,7 +319,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
u32 tmp[2];
|
||||
int len = w ? sizeof(u32) : sizeof(u64);
|
||||
|
||||
kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
|
||||
tmp[1] = vcpu->arch.qpr[rs];
|
||||
|
||||
r = kvmppc_st(vcpu, &addr, len, tmp, true);
|
||||
@ -512,7 +512,6 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
|
||||
u32 *src2, u32 *src3))
|
||||
{
|
||||
u32 *qpr = vcpu->arch.qpr;
|
||||
u64 *fpr = vcpu->arch.fpr;
|
||||
u32 ps0_out;
|
||||
u32 ps0_in1, ps0_in2, ps0_in3;
|
||||
u32 ps1_in1, ps1_in2, ps1_in3;
|
||||
@ -521,20 +520,20 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
|
||||
WARN_ON(rc);
|
||||
|
||||
/* PS0 */
|
||||
kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
|
||||
kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
|
||||
kvm_cvt_df(&fpr[reg_in3], &ps0_in3);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
|
||||
|
||||
if (scalar & SCALAR_LOW)
|
||||
ps0_in2 = qpr[reg_in2];
|
||||
|
||||
func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
|
||||
func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
|
||||
|
||||
dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
|
||||
ps0_in1, ps0_in2, ps0_in3, ps0_out);
|
||||
|
||||
if (!(scalar & SCALAR_NO_PS0))
|
||||
kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
|
||||
kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
|
||||
|
||||
/* PS1 */
|
||||
ps1_in1 = qpr[reg_in1];
|
||||
@ -545,7 +544,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
|
||||
ps1_in2 = ps0_in2;
|
||||
|
||||
if (!(scalar & SCALAR_NO_PS1))
|
||||
func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
|
||||
func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
|
||||
|
||||
dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
|
||||
ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
|
||||
@ -561,7 +560,6 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
|
||||
u32 *src2))
|
||||
{
|
||||
u32 *qpr = vcpu->arch.qpr;
|
||||
u64 *fpr = vcpu->arch.fpr;
|
||||
u32 ps0_out;
|
||||
u32 ps0_in1, ps0_in2;
|
||||
u32 ps1_out;
|
||||
@ -571,20 +569,20 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
|
||||
WARN_ON(rc);
|
||||
|
||||
/* PS0 */
|
||||
kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
|
||||
|
||||
if (scalar & SCALAR_LOW)
|
||||
ps0_in2 = qpr[reg_in2];
|
||||
else
|
||||
kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
|
||||
|
||||
func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
|
||||
func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
|
||||
|
||||
if (!(scalar & SCALAR_NO_PS0)) {
|
||||
dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
|
||||
ps0_in1, ps0_in2, ps0_out);
|
||||
|
||||
kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
|
||||
kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
|
||||
}
|
||||
|
||||
/* PS1 */
|
||||
@ -594,7 +592,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
|
||||
if (scalar & SCALAR_HIGH)
|
||||
ps1_in2 = ps0_in2;
|
||||
|
||||
func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
|
||||
func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
|
||||
|
||||
if (!(scalar & SCALAR_NO_PS1)) {
|
||||
qpr[reg_out] = ps1_out;
|
||||
@ -612,7 +610,6 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
|
||||
u32 *dst, u32 *src1))
|
||||
{
|
||||
u32 *qpr = vcpu->arch.qpr;
|
||||
u64 *fpr = vcpu->arch.fpr;
|
||||
u32 ps0_out, ps0_in;
|
||||
u32 ps1_in;
|
||||
|
||||
@ -620,17 +617,17 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
|
||||
WARN_ON(rc);
|
||||
|
||||
/* PS0 */
|
||||
kvm_cvt_df(&fpr[reg_in], &ps0_in);
|
||||
func(&vcpu->arch.fpscr, &ps0_out, &ps0_in);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
|
||||
func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
|
||||
|
||||
dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
|
||||
ps0_in, ps0_out);
|
||||
|
||||
kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
|
||||
kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
|
||||
|
||||
/* PS1 */
|
||||
ps1_in = qpr[reg_in];
|
||||
func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in);
|
||||
func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
|
||||
|
||||
dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
|
||||
ps1_in, qpr[reg_out]);
|
||||
@ -649,10 +646,10 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
int ax_rc = inst_get_field(inst, 21, 25);
|
||||
short full_d = inst_get_field(inst, 16, 31);
|
||||
|
||||
u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
|
||||
u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
|
||||
u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
|
||||
u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
|
||||
u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
|
||||
u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
|
||||
u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
|
||||
u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
|
||||
|
||||
bool rcomp = (inst & 1) ? true : false;
|
||||
u32 cr = kvmppc_get_cr(vcpu);
|
||||
@ -674,11 +671,11 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
/* Do we need to clear FE0 / FE1 here? Don't think so. */
|
||||
|
||||
#ifdef DEBUG
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
|
||||
u32 f;
|
||||
kvm_cvt_df(&vcpu->arch.fpr[i], &f);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
|
||||
dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
|
||||
i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
|
||||
i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -764,8 +761,8 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
case OP_4X_PS_NEG:
|
||||
vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
|
||||
vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
|
||||
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
|
||||
VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
|
||||
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
|
||||
vcpu->arch.qpr[ax_rd] ^= 0x80000000;
|
||||
break;
|
||||
@ -775,7 +772,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
case OP_4X_PS_MR:
|
||||
WARN_ON(rcomp);
|
||||
vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
|
||||
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
|
||||
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
|
||||
break;
|
||||
case OP_4X_PS_CMPO1:
|
||||
@ -784,44 +781,44 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
case OP_4X_PS_NABS:
|
||||
WARN_ON(rcomp);
|
||||
vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
|
||||
vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
|
||||
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
|
||||
VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
|
||||
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
|
||||
vcpu->arch.qpr[ax_rd] |= 0x80000000;
|
||||
break;
|
||||
case OP_4X_PS_ABS:
|
||||
WARN_ON(rcomp);
|
||||
vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
|
||||
vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
|
||||
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
|
||||
VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
|
||||
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
|
||||
vcpu->arch.qpr[ax_rd] &= ~0x80000000;
|
||||
break;
|
||||
case OP_4X_PS_MERGE00:
|
||||
WARN_ON(rcomp);
|
||||
vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
|
||||
/* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
|
||||
kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
|
||||
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
|
||||
/* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
|
||||
&vcpu->arch.qpr[ax_rd]);
|
||||
break;
|
||||
case OP_4X_PS_MERGE01:
|
||||
WARN_ON(rcomp);
|
||||
vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
|
||||
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
|
||||
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
|
||||
break;
|
||||
case OP_4X_PS_MERGE10:
|
||||
WARN_ON(rcomp);
|
||||
/* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
|
||||
/* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
|
||||
kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
|
||||
&vcpu->arch.fpr[ax_rd]);
|
||||
/* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
|
||||
kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
|
||||
&VCPU_FPR(vcpu, ax_rd));
|
||||
/* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
|
||||
&vcpu->arch.qpr[ax_rd]);
|
||||
break;
|
||||
case OP_4X_PS_MERGE11:
|
||||
WARN_ON(rcomp);
|
||||
/* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
|
||||
/* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
|
||||
kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
|
||||
&vcpu->arch.fpr[ax_rd]);
|
||||
&VCPU_FPR(vcpu, ax_rd));
|
||||
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
|
||||
break;
|
||||
}
|
||||
@ -856,7 +853,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
case OP_4A_PS_SUM1:
|
||||
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
|
||||
ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
|
||||
vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
|
||||
VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
|
||||
break;
|
||||
case OP_4A_PS_SUM0:
|
||||
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
|
||||
@ -1106,45 +1103,45 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
case 59:
|
||||
switch (inst_get_field(inst, 21, 30)) {
|
||||
case OP_59_FADDS:
|
||||
fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_59_FSUBS:
|
||||
fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_59_FDIVS:
|
||||
fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_59_FRES:
|
||||
fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
|
||||
fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_59_FRSQRTES:
|
||||
fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
|
||||
fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
}
|
||||
switch (inst_get_field(inst, 26, 30)) {
|
||||
case OP_59_FMULS:
|
||||
fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
|
||||
fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_59_FMSUBS:
|
||||
fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_59_FMADDS:
|
||||
fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_59_FNMSUBS:
|
||||
fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_59_FNMADDS:
|
||||
fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
}
|
||||
@ -1159,12 +1156,12 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
case OP_63_MFFS:
|
||||
/* XXX missing CR */
|
||||
*fpr_d = vcpu->arch.fpscr;
|
||||
*fpr_d = vcpu->arch.fp.fpscr;
|
||||
break;
|
||||
case OP_63_MTFSF:
|
||||
/* XXX missing fm bits */
|
||||
/* XXX missing CR */
|
||||
vcpu->arch.fpscr = *fpr_b;
|
||||
vcpu->arch.fp.fpscr = *fpr_b;
|
||||
break;
|
||||
case OP_63_FCMPU:
|
||||
{
|
||||
@ -1172,7 +1169,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
u32 cr0_mask = 0xf0000000;
|
||||
u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
|
||||
|
||||
fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
|
||||
fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
|
||||
cr &= ~(cr0_mask >> cr_shift);
|
||||
cr |= (cr & cr0_mask) >> cr_shift;
|
||||
break;
|
||||
@ -1183,40 +1180,40 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
u32 cr0_mask = 0xf0000000;
|
||||
u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
|
||||
|
||||
fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
|
||||
fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
|
||||
cr &= ~(cr0_mask >> cr_shift);
|
||||
cr |= (cr & cr0_mask) >> cr_shift;
|
||||
break;
|
||||
}
|
||||
case OP_63_FNEG:
|
||||
fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
|
||||
fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
|
||||
break;
|
||||
case OP_63_FMR:
|
||||
*fpr_d = *fpr_b;
|
||||
break;
|
||||
case OP_63_FABS:
|
||||
fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
|
||||
fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
|
||||
break;
|
||||
case OP_63_FCPSGN:
|
||||
fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
break;
|
||||
case OP_63_FDIV:
|
||||
fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
break;
|
||||
case OP_63_FADD:
|
||||
fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
break;
|
||||
case OP_63_FSUB:
|
||||
fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
|
||||
break;
|
||||
case OP_63_FCTIW:
|
||||
fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
|
||||
fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
|
||||
break;
|
||||
case OP_63_FCTIWZ:
|
||||
fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
|
||||
fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
|
||||
break;
|
||||
case OP_63_FRSP:
|
||||
fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
|
||||
fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
|
||||
kvmppc_sync_qpr(vcpu, ax_rd);
|
||||
break;
|
||||
case OP_63_FRSQRTE:
|
||||
@ -1224,39 +1221,39 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
double one = 1.0f;
|
||||
|
||||
/* fD = sqrt(fB) */
|
||||
fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
|
||||
fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
|
||||
/* fD = 1.0f / fD */
|
||||
fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
|
||||
fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
|
||||
break;
|
||||
}
|
||||
}
|
||||
switch (inst_get_field(inst, 26, 30)) {
|
||||
case OP_63_FMUL:
|
||||
fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
|
||||
fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
|
||||
break;
|
||||
case OP_63_FSEL:
|
||||
fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
break;
|
||||
case OP_63_FMSUB:
|
||||
fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
break;
|
||||
case OP_63_FMADD:
|
||||
fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
break;
|
||||
case OP_63_FNMSUB:
|
||||
fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
break;
|
||||
case OP_63_FNMADD:
|
||||
fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
|
||||
u32 f;
|
||||
kvm_cvt_df(&vcpu->arch.fpr[i], &f);
|
||||
kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
|
||||
dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
|
||||
}
|
||||
#endif
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#include "book3s.h"
|
||||
|
||||
@ -566,12 +567,6 @@ static inline int get_fpr_index(int i)
|
||||
void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
|
||||
{
|
||||
struct thread_struct *t = ¤t->thread;
|
||||
u64 *vcpu_fpr = vcpu->arch.fpr;
|
||||
#ifdef CONFIG_VSX
|
||||
u64 *vcpu_vsx = vcpu->arch.vsr;
|
||||
#endif
|
||||
u64 *thread_fpr = &t->fp_state.fpr[0][0];
|
||||
int i;
|
||||
|
||||
/*
|
||||
* VSX instructions can access FP and vector registers, so if
|
||||
@ -594,26 +589,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
|
||||
* both the traditional FP registers and the added VSX
|
||||
* registers into thread.fp_state.fpr[].
|
||||
*/
|
||||
if (current->thread.regs->msr & MSR_FP)
|
||||
if (t->regs->msr & MSR_FP)
|
||||
giveup_fpu(current);
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
|
||||
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
|
||||
|
||||
vcpu->arch.fpscr = t->fp_state.fpscr;
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
if (cpu_has_feature(CPU_FTR_VSX))
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
|
||||
vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
|
||||
#endif
|
||||
t->fp_save_area = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (msr & MSR_VEC) {
|
||||
if (current->thread.regs->msr & MSR_VEC)
|
||||
giveup_altivec(current);
|
||||
memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
|
||||
vcpu->arch.vscr = t->vr_state.vscr;
|
||||
t->vr_save_area = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -661,12 +646,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
||||
ulong msr)
|
||||
{
|
||||
struct thread_struct *t = ¤t->thread;
|
||||
u64 *vcpu_fpr = vcpu->arch.fpr;
|
||||
#ifdef CONFIG_VSX
|
||||
u64 *vcpu_vsx = vcpu->arch.vsr;
|
||||
#endif
|
||||
u64 *thread_fpr = &t->fp_state.fpr[0][0];
|
||||
int i;
|
||||
|
||||
/* When we have paired singles, we emulate in software */
|
||||
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
|
||||
@ -704,27 +683,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
||||
#endif
|
||||
|
||||
if (msr & MSR_FP) {
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
|
||||
thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
|
||||
#ifdef CONFIG_VSX
|
||||
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
|
||||
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
|
||||
#endif
|
||||
t->fp_state.fpscr = vcpu->arch.fpscr;
|
||||
t->fpexc_mode = 0;
|
||||
kvmppc_load_up_fpu();
|
||||
enable_kernel_fp();
|
||||
load_fp_state(&vcpu->arch.fp);
|
||||
t->fp_save_area = &vcpu->arch.fp;
|
||||
}
|
||||
|
||||
if (msr & MSR_VEC) {
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
|
||||
t->vr_state.vscr = vcpu->arch.vscr;
|
||||
t->vrsave = -1;
|
||||
kvmppc_load_up_altivec();
|
||||
enable_kernel_altivec();
|
||||
load_vr_state(&vcpu->arch.vr);
|
||||
t->vr_save_area = &vcpu->arch.vr;
|
||||
#endif
|
||||
}
|
||||
|
||||
current->thread.regs->msr |= msr;
|
||||
t->regs->msr |= msr;
|
||||
vcpu->arch.guest_owned_ext |= msr;
|
||||
kvmppc_recalc_shadow_msr(vcpu);
|
||||
|
||||
@ -743,11 +715,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
|
||||
if (!lost_ext)
|
||||
return;
|
||||
|
||||
if (lost_ext & MSR_FP)
|
||||
kvmppc_load_up_fpu();
|
||||
if (lost_ext & MSR_FP) {
|
||||
enable_kernel_fp();
|
||||
load_fp_state(&vcpu->arch.fp);
|
||||
}
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (lost_ext & MSR_VEC)
|
||||
kvmppc_load_up_altivec();
|
||||
if (lost_ext & MSR_VEC) {
|
||||
enable_kernel_altivec();
|
||||
load_vr_state(&vcpu->arch.vr);
|
||||
}
|
||||
#endif
|
||||
current->thread.regs->msr |= lost_ext;
|
||||
}
|
||||
@ -873,6 +849,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
/* We're good on these - the host merely wanted to get our attention */
|
||||
case BOOK3S_INTERRUPT_DECREMENTER:
|
||||
case BOOK3S_INTERRUPT_HV_DECREMENTER:
|
||||
case BOOK3S_INTERRUPT_DOORBELL:
|
||||
vcpu->stat.dec_exits++;
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
@ -1045,14 +1022,14 @@ program_interrupt:
|
||||
* and if we really did time things so badly, then we just exit
|
||||
* again due to a host external interrupt.
|
||||
*/
|
||||
local_irq_disable();
|
||||
s = kvmppc_prepare_to_enter(vcpu);
|
||||
if (s <= 0) {
|
||||
local_irq_enable();
|
||||
if (s <= 0)
|
||||
r = s;
|
||||
} else {
|
||||
else {
|
||||
/* interrupts now hard-disabled */
|
||||
kvmppc_fix_ee_before_entry();
|
||||
}
|
||||
|
||||
kvmppc_handle_lost_ext(vcpu);
|
||||
}
|
||||
|
||||
@ -1133,19 +1110,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_HIOR:
|
||||
*val = get_reg_val(id, to_book3s(vcpu)->hior);
|
||||
break;
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
|
||||
long int i = id - KVM_REG_PPC_VSR0;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_VSX)) {
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
val->vsxval[0] = vcpu->arch.fpr[i];
|
||||
val->vsxval[1] = vcpu->arch.vsr[i];
|
||||
break;
|
||||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
default:
|
||||
r = -EINVAL;
|
||||
break;
|
||||
@ -1164,19 +1128,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
|
||||
to_book3s(vcpu)->hior = set_reg_val(id, *val);
|
||||
to_book3s(vcpu)->hior_explicit = true;
|
||||
break;
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
|
||||
long int i = id - KVM_REG_PPC_VSR0;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_VSX)) {
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
vcpu->arch.fpr[i] = val->vsxval[0];
|
||||
vcpu->arch.vsr[i] = val->vsxval[1];
|
||||
break;
|
||||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
default:
|
||||
r = -EINVAL;
|
||||
break;
|
||||
@ -1274,17 +1225,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
|
||||
static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
struct thread_fp_state fp;
|
||||
int fpexc_mode;
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
struct thread_vr_state vr;
|
||||
unsigned long uninitialized_var(vrsave);
|
||||
int used_vr;
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
int used_vsr;
|
||||
#endif
|
||||
ulong ext_msr;
|
||||
|
||||
/* Check if we can run the vcpu at all */
|
||||
if (!vcpu->arch.sane) {
|
||||
@ -1299,40 +1242,27 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
* really did time things so badly, then we just exit again due to
|
||||
* a host external interrupt.
|
||||
*/
|
||||
local_irq_disable();
|
||||
ret = kvmppc_prepare_to_enter(vcpu);
|
||||
if (ret <= 0) {
|
||||
local_irq_enable();
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
}
|
||||
/* interrupts now hard-disabled */
|
||||
|
||||
/* Save FPU state in stack */
|
||||
/* Save FPU state in thread_struct */
|
||||
if (current->thread.regs->msr & MSR_FP)
|
||||
giveup_fpu(current);
|
||||
fp = current->thread.fp_state;
|
||||
fpexc_mode = current->thread.fpexc_mode;
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* Save Altivec state in stack */
|
||||
used_vr = current->thread.used_vr;
|
||||
if (used_vr) {
|
||||
if (current->thread.regs->msr & MSR_VEC)
|
||||
giveup_altivec(current);
|
||||
vr = current->thread.vr_state;
|
||||
vrsave = current->thread.vrsave;
|
||||
}
|
||||
/* Save Altivec state in thread_struct */
|
||||
if (current->thread.regs->msr & MSR_VEC)
|
||||
giveup_altivec(current);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
/* Save VSX state in stack */
|
||||
used_vsr = current->thread.used_vsr;
|
||||
if (used_vsr && (current->thread.regs->msr & MSR_VSX))
|
||||
/* Save VSX state in thread_struct */
|
||||
if (current->thread.regs->msr & MSR_VSX)
|
||||
__giveup_vsx(current);
|
||||
#endif
|
||||
|
||||
/* Remember the MSR with disabled extensions */
|
||||
ext_msr = current->thread.regs->msr;
|
||||
|
||||
/* Preload FPU if it's enabled */
|
||||
if (vcpu->arch.shared->msr & MSR_FP)
|
||||
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
||||
@ -1347,25 +1277,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
/* Make sure we save the guest FPU/Altivec/VSX state */
|
||||
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
|
||||
|
||||
current->thread.regs->msr = ext_msr;
|
||||
|
||||
/* Restore FPU/VSX state from stack */
|
||||
current->thread.fp_state = fp;
|
||||
current->thread.fpexc_mode = fpexc_mode;
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* Restore Altivec state from stack */
|
||||
if (used_vr && current->thread.used_vr) {
|
||||
current->thread.vr_state = vr;
|
||||
current->thread.vrsave = vrsave;
|
||||
}
|
||||
current->thread.used_vr = used_vr;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
current->thread.used_vsr = used_vsr;
|
||||
#endif
|
||||
|
||||
out:
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
return ret;
|
||||
@ -1606,4 +1517,6 @@ module_init(kvmppc_book3s_init_pr);
|
||||
module_exit(kvmppc_book3s_exit_pr);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
#endif
|
||||
|
@ -162,51 +162,4 @@ _GLOBAL(kvmppc_entry_trampoline)
|
||||
mtsrr1 r6
|
||||
RFI
|
||||
|
||||
#if defined(CONFIG_PPC_BOOK3S_32)
|
||||
#define STACK_LR INT_FRAME_SIZE+4
|
||||
|
||||
/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
|
||||
#define MSR_EXT_START \
|
||||
PPC_STL r20, _NIP(r1); \
|
||||
mfmsr r20; \
|
||||
LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
|
||||
andc r3,r20,r3; /* Disable DR,EE */ \
|
||||
mtmsr r3; \
|
||||
sync
|
||||
|
||||
#define MSR_EXT_END \
|
||||
mtmsr r20; /* Enable DR,EE */ \
|
||||
sync; \
|
||||
PPC_LL r20, _NIP(r1)
|
||||
|
||||
#elif defined(CONFIG_PPC_BOOK3S_64)
|
||||
#define STACK_LR _LINK
|
||||
#define MSR_EXT_START
|
||||
#define MSR_EXT_END
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Activate current's external feature (FPU/Altivec/VSX)
|
||||
*/
|
||||
#define define_load_up(what) \
|
||||
\
|
||||
_GLOBAL(kvmppc_load_up_ ## what); \
|
||||
PPC_STLU r1, -INT_FRAME_SIZE(r1); \
|
||||
mflr r3; \
|
||||
PPC_STL r3, STACK_LR(r1); \
|
||||
MSR_EXT_START; \
|
||||
\
|
||||
bl FUNC(load_up_ ## what); \
|
||||
\
|
||||
MSR_EXT_END; \
|
||||
PPC_LL r3, STACK_LR(r1); \
|
||||
mtlr r3; \
|
||||
addi r1, r1, INT_FRAME_SIZE; \
|
||||
blr
|
||||
|
||||
define_load_up(fpu)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
define_load_up(altivec)
|
||||
#endif
|
||||
|
||||
#include "book3s_segment.S"
|
||||
|
@ -361,6 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
||||
beqa BOOK3S_INTERRUPT_DECREMENTER
|
||||
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
|
||||
beqa BOOK3S_INTERRUPT_PERFMON
|
||||
cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
|
||||
beqa BOOK3S_INTERRUPT_DOORBELL
|
||||
|
||||
RFI
|
||||
kvmppc_handler_trampoline_exit_end:
|
||||
|
@ -1246,8 +1246,10 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
|
||||
kvm->arch.xics = xics;
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(xics);
|
||||
return ret;
|
||||
}
|
||||
|
||||
xics_debugfs_init(xics);
|
||||
|
||||
|
@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
local_irq_enable();
|
||||
kvm_vcpu_block(vcpu);
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
|
||||
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
||||
r = 1;
|
||||
@ -682,34 +682,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret, s;
|
||||
struct debug_reg debug;
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
struct thread_fp_state fp;
|
||||
int fpexc_mode;
|
||||
#endif
|
||||
|
||||
if (!vcpu->arch.sane) {
|
||||
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
s = kvmppc_prepare_to_enter(vcpu);
|
||||
if (s <= 0) {
|
||||
local_irq_enable();
|
||||
ret = s;
|
||||
goto out;
|
||||
}
|
||||
/* interrupts now hard-disabled */
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
/* Save userspace FPU state in stack */
|
||||
enable_kernel_fp();
|
||||
fp = current->thread.fp_state;
|
||||
fpexc_mode = current->thread.fpexc_mode;
|
||||
|
||||
/* Restore guest FPU state to thread */
|
||||
memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
|
||||
sizeof(vcpu->arch.fpr));
|
||||
current->thread.fp_state.fpscr = vcpu->arch.fpscr;
|
||||
|
||||
/*
|
||||
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
|
||||
@ -728,6 +716,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
debug = current->thread.debug;
|
||||
current->thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
|
||||
vcpu->arch.pgdir = current->mm->pgd;
|
||||
kvmppc_fix_ee_before_entry();
|
||||
|
||||
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
||||
@ -743,15 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
kvmppc_save_guest_fp(vcpu);
|
||||
|
||||
vcpu->fpu_active = 0;
|
||||
|
||||
/* Save guest FPU state from thread */
|
||||
memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
|
||||
sizeof(vcpu->arch.fpr));
|
||||
vcpu->arch.fpscr = current->thread.fp_state.fpscr;
|
||||
|
||||
/* Restore userspace FPU state from stack */
|
||||
current->thread.fp_state = fp;
|
||||
current->thread.fpexc_mode = fpexc_mode;
|
||||
#endif
|
||||
|
||||
out:
|
||||
@ -898,17 +878,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int s;
|
||||
int idx;
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
WARN_ON(local_paca->irq_happened != 0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We enter with interrupts disabled in hardware, but
|
||||
* we need to call hard_irq_disable anyway to ensure that
|
||||
* the software state is kept in sync.
|
||||
*/
|
||||
hard_irq_disable();
|
||||
|
||||
/* update before a new last_exit_type is rewritten */
|
||||
kvmppc_update_timing_stats(vcpu);
|
||||
|
||||
@ -1217,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
* aren't already exiting to userspace for some other reason.
|
||||
*/
|
||||
if (!(r & RESUME_HOST)) {
|
||||
local_irq_disable();
|
||||
s = kvmppc_prepare_to_enter(vcpu);
|
||||
if (s <= 0) {
|
||||
local_irq_enable();
|
||||
if (s <= 0)
|
||||
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
|
||||
} else {
|
||||
else {
|
||||
/* interrupts now hard-disabled */
|
||||
kvmppc_fix_ee_before_entry();
|
||||
}
|
||||
}
|
||||
|
@ -136,7 +136,9 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
|
||||
load_up_fpu();
|
||||
enable_kernel_fp();
|
||||
load_fp_state(&vcpu->arch.fp);
|
||||
current->thread.fp_save_area = &vcpu->arch.fp;
|
||||
current->thread.regs->msr |= MSR_FP;
|
||||
}
|
||||
#endif
|
||||
@ -151,6 +153,7 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
|
||||
giveup_fpu(current);
|
||||
current->thread.fp_save_area = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,8 @@
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#include <asm/exception-64e.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/irqflags.h>
|
||||
#else
|
||||
#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
|
||||
#endif
|
||||
@ -465,6 +467,15 @@ _GLOBAL(kvmppc_resume_host)
|
||||
mtspr SPRN_EPCR, r3
|
||||
isync
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/*
|
||||
* We enter with interrupts disabled in hardware, but
|
||||
* we need to call RECONCILE_IRQ_STATE to ensure
|
||||
* that the software state is kept in sync.
|
||||
*/
|
||||
RECONCILE_IRQ_STATE(r3,r5)
|
||||
#endif
|
||||
|
||||
/* Switch to kernel stack and jump to handler. */
|
||||
PPC_LL r3, HOST_RUN(r1)
|
||||
mr r5, r14 /* intno */
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
@ -573,3 +575,5 @@ static void __exit kvmppc_e500_exit(void)
|
||||
|
||||
module_init(kvmppc_e500_init);
|
||||
module_exit(kvmppc_e500_exit);
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
|
@ -31,11 +31,13 @@ enum vcpu_ftr {
|
||||
#define E500_TLB_NUM 2
|
||||
|
||||
/* entry is mapped somewhere in host TLB */
|
||||
#define E500_TLB_VALID (1 << 0)
|
||||
#define E500_TLB_VALID (1 << 31)
|
||||
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
|
||||
#define E500_TLB_BITMAP (1 << 1)
|
||||
#define E500_TLB_BITMAP (1 << 30)
|
||||
/* TLB1 entry is mapped by host TLB0 */
|
||||
#define E500_TLB_TLB0 (1 << 2)
|
||||
#define E500_TLB_TLB0 (1 << 29)
|
||||
/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
|
||||
#define E500_TLB_MAS2_ATTR (0x7f)
|
||||
|
||||
struct tlbe_ref {
|
||||
pfn_t pfn; /* valid only for TLB0, except briefly */
|
||||
|
@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
}
|
||||
|
||||
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
|
||||
unsigned int eaddr, int as)
|
||||
gva_t eaddr, int as)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
unsigned int victim, tsized;
|
||||
|
@ -65,15 +65,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
|
||||
return mas3;
|
||||
}
|
||||
|
||||
static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
|
||||
#else
|
||||
return mas2 & MAS2_ATTRIB_MASK;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* writing shadow tlb entry to host TLB
|
||||
*/
|
||||
@ -231,15 +222,15 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
|
||||
ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
|
||||
}
|
||||
|
||||
/* Already invalidated in between */
|
||||
if (!(ref->flags & E500_TLB_VALID))
|
||||
return;
|
||||
|
||||
/* Guest tlbe is backed by at most one host tlbe per shadow pid. */
|
||||
kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
|
||||
/*
|
||||
* If TLB entry is still valid then it's a TLB0 entry, and thus
|
||||
* backed by at most one host tlbe per shadow pid
|
||||
*/
|
||||
if (ref->flags & E500_TLB_VALID)
|
||||
kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
|
||||
|
||||
/* Mark the TLB as not backed by the host anymore */
|
||||
ref->flags &= ~E500_TLB_VALID;
|
||||
ref->flags = 0;
|
||||
}
|
||||
|
||||
static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
|
||||
@ -249,10 +240,13 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
|
||||
|
||||
static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
||||
struct kvm_book3e_206_tlb_entry *gtlbe,
|
||||
pfn_t pfn)
|
||||
pfn_t pfn, unsigned int wimg)
|
||||
{
|
||||
ref->pfn = pfn;
|
||||
ref->flags |= E500_TLB_VALID;
|
||||
ref->flags = E500_TLB_VALID;
|
||||
|
||||
/* Use guest supplied MAS2_G and MAS2_E */
|
||||
ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
|
||||
|
||||
/* Mark the page accessed */
|
||||
kvm_set_pfn_accessed(pfn);
|
||||
@ -316,8 +310,7 @@ static void kvmppc_e500_setup_stlbe(
|
||||
|
||||
/* Force IPROT=0 for all guest mappings. */
|
||||
stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
|
||||
stlbe->mas2 = (gvaddr & MAS2_EPN) |
|
||||
e500_shadow_mas2_attrib(gtlbe->mas2, pr);
|
||||
stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
|
||||
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
|
||||
e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
|
||||
|
||||
@ -339,6 +332,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
int ret = 0;
|
||||
unsigned long mmu_seq;
|
||||
struct kvm *kvm = vcpu_e500->vcpu.kvm;
|
||||
unsigned long tsize_pages = 0;
|
||||
pte_t *ptep;
|
||||
unsigned int wimg = 0;
|
||||
pgd_t *pgdir;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
@ -405,7 +402,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
*/
|
||||
|
||||
for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
|
||||
unsigned long gfn_start, gfn_end, tsize_pages;
|
||||
unsigned long gfn_start, gfn_end;
|
||||
tsize_pages = 1 << (tsize - 2);
|
||||
|
||||
gfn_start = gfn & ~(tsize_pages - 1);
|
||||
@ -447,11 +444,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
}
|
||||
|
||||
if (likely(!pfnmap)) {
|
||||
unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
|
||||
tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
|
||||
pfn = gfn_to_pfn_memslot(slot, gfn);
|
||||
if (is_error_noslot_pfn(pfn)) {
|
||||
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
|
||||
(long)gfn);
|
||||
if (printk_ratelimit())
|
||||
pr_err("%s: real page not found for gfn %lx\n",
|
||||
__func__, (long)gfn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -466,7 +464,18 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
|
||||
|
||||
pgdir = vcpu_e500->vcpu.arch.pgdir;
|
||||
ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
|
||||
if (pte_present(*ptep))
|
||||
wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
|
||||
else {
|
||||
if (printk_ratelimit())
|
||||
pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
|
||||
__func__, (long)gfn, pfn);
|
||||
return -EINVAL;
|
||||
}
|
||||
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
|
||||
|
||||
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
|
||||
ref, gvaddr, stlbe);
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
@ -391,3 +393,5 @@ static void __exit kvmppc_e500mc_exit(void)
|
||||
|
||||
module_init(kvmppc_e500mc_init);
|
||||
module_exit(kvmppc_e500mc_exit);
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
|
@ -219,7 +219,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
|
||||
* lmw
|
||||
* stmw
|
||||
*
|
||||
* XXX is_bigendian should depend on MMU mapping or MSR[LE]
|
||||
*/
|
||||
/* XXX Should probably auto-generate instruction decoding for a particular core
|
||||
* from opcode tables in the future. */
|
||||
|
@ -1635,6 +1635,7 @@ static void mpic_destroy(struct kvm_device *dev)
|
||||
|
||||
dev->kvm->arch.mpic = NULL;
|
||||
kfree(opp);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static int mpic_set_default_irq_routing(struct openpic *opp)
|
||||
|
@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r = 1;
|
||||
int r;
|
||||
|
||||
WARN_ON(irqs_disabled());
|
||||
hard_irq_disable();
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
while (true) {
|
||||
if (need_resched()) {
|
||||
local_irq_enable();
|
||||
cond_resched();
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
local_irq_enable();
|
||||
trace_kvm_check_requests(vcpu);
|
||||
r = kvmppc_core_check_requests(vcpu);
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
if (r > 0)
|
||||
continue;
|
||||
break;
|
||||
@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
continue;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* lazy EE magic */
|
||||
hard_irq_disable();
|
||||
if (lazy_irq_pending()) {
|
||||
/* Got an interrupt in between, try again */
|
||||
local_irq_enable();
|
||||
local_irq_disable();
|
||||
kvm_guest_exit();
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
kvm_guest_enter();
|
||||
break;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* return to host */
|
||||
local_irq_enable();
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
|
||||
@ -656,14 +648,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
|
||||
break;
|
||||
case KVM_MMIO_REG_FPR:
|
||||
vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
|
||||
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
|
||||
break;
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
case KVM_MMIO_REG_QPR:
|
||||
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
|
||||
break;
|
||||
case KVM_MMIO_REG_FQPR:
|
||||
vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
|
||||
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
|
||||
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
|
||||
break;
|
||||
#endif
|
||||
@ -673,9 +665,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes, int is_bigendian)
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian)
|
||||
{
|
||||
int idx, ret;
|
||||
int is_bigendian;
|
||||
|
||||
if (kvmppc_need_byteswap(vcpu)) {
|
||||
/* Default endianness is "little endian". */
|
||||
is_bigendian = !is_default_endian;
|
||||
} else {
|
||||
/* Default endianness is "big endian". */
|
||||
is_bigendian = is_default_endian;
|
||||
}
|
||||
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
|
||||
@ -711,21 +713,31 @@ EXPORT_SYMBOL_GPL(kvmppc_handle_load);
|
||||
|
||||
/* Same as above, but sign extends */
|
||||
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes, int is_bigendian)
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian)
|
||||
{
|
||||
int r;
|
||||
|
||||
vcpu->arch.mmio_sign_extend = 1;
|
||||
r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
|
||||
r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
u64 val, unsigned int bytes, int is_bigendian)
|
||||
u64 val, unsigned int bytes, int is_default_endian)
|
||||
{
|
||||
void *data = run->mmio.data;
|
||||
int idx, ret;
|
||||
int is_bigendian;
|
||||
|
||||
if (kvmppc_need_byteswap(vcpu)) {
|
||||
/* Default endianness is "little endian". */
|
||||
is_bigendian = !is_default_endian;
|
||||
} else {
|
||||
/* Default endianness is "big endian". */
|
||||
is_bigendian = is_default_endian;
|
||||
}
|
||||
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
|
||||
|
Loading…
Reference in New Issue
Block a user