mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 23:23:03 +00:00
797a5af8fc
Commit7092360399
("KVM: PPC: Reimplement non-SIMD LOAD/STORE instruction mmio emulation with analyse_instr() input") and commit2b33cb585f
("KVM: PPC: Reimplement LOAD_FP/STORE_FP instruction mmio emulation with analyse_instr() input") made kvmppc_emulate_loadstore() use the results from analyse_instr() for instruction emulation. In particular the effective address from analyse_instr() is used for UPDATE type instructions and fact that op.val is all ready endian corrected is used in the STORE case. However, these changes now have some negative implications for the nestedv2 case. For analyse_instr() to determine the correct effective address, the GPRs must be loaded from the L0. This is not needed as vcpu->arch.vaddr_accessed is already set. Change back to using vcpu->arch.vaddr_accessed. In the STORE case, use kvmppc_get_gpr() value instead of the op.val. kvmppc_get_gpr() will reload from the L0 if needed in the nestedv2 case. This means if a byte reversal is needed must now be passed to kvmppc_handle_store() like in the kvmppc_handle_load() case. This means the call to kvmhv_nestedv2_reload_ptregs() can be avoided as there is no concern about op.val being stale. Drop the call to kvmhv_nestedv2_mark_dirty_ptregs() as without the call to kvmhv_nestedv2_reload_ptregs(), stale state could be marked as valid. This is fine as the required marking things dirty is already handled for the UPDATE case by the call to kvmppc_set_gpr(). For LOADs, it is handled in kvmppc_complete_mmio_load(). This is called either directly in __kvmppc_handle_load() if the load can be handled in KVM, or on the next kvm_arch_vcpu_ioctl_run() if an exit was required. Signed-off-by: Jordan Niethe <jniethe5@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20231201132618.555031-12-vaibhav@linux.ibm.com
368 lines
9.2 KiB
C
368 lines
9.2 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
*
|
|
* Copyright IBM Corp. 2007
|
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
|
*
|
|
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
|
*/
|
|
|
|
#include <linux/jiffies.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/types.h>
|
|
#include <linux/string.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/clockchips.h>
|
|
|
|
#include <asm/reg.h>
|
|
#include <asm/time.h>
|
|
#include <asm/byteorder.h>
|
|
#include <asm/kvm_ppc.h>
|
|
#include <asm/disassemble.h>
|
|
#include <asm/ppc-opcode.h>
|
|
#include <asm/sstep.h>
|
|
#include "timing.h"
|
|
#include "trace.h"
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
|
|
kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_PPC_FPU */
|
|
|
|
#ifdef CONFIG_VSX
|
|
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
|
|
kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_VSX */
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
|
|
kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
/*
|
|
* XXX to do:
|
|
* lfiwax, lfiwzx
|
|
* vector loads and stores
|
|
*
|
|
* Instructions that trap when used on cache-inhibited mappings
|
|
* are not emulated here: multiple and string instructions,
|
|
* lq/stq, and the load-reserve/store-conditional instructions.
|
|
*/
|
|
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|
{
|
|
ppc_inst_t inst;
|
|
enum emulation_result emulated = EMULATE_FAIL;
|
|
struct instruction_op op;
|
|
|
|
/* this default type might be overwritten by subcategories */
|
|
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
|
|
|
|
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
|
|
if (emulated != EMULATE_DONE)
|
|
return emulated;
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums = 0;
|
|
vcpu->arch.mmio_vsx_offset = 0;
|
|
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
|
|
vcpu->arch.mmio_sp64_extend = 0;
|
|
vcpu->arch.mmio_sign_extend = 0;
|
|
vcpu->arch.mmio_vmx_copy_nums = 0;
|
|
vcpu->arch.mmio_vmx_offset = 0;
|
|
vcpu->arch.mmio_host_swabbed = 0;
|
|
|
|
emulated = EMULATE_FAIL;
|
|
vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
|
|
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
|
|
int type = op.type & INSTR_TYPE_MASK;
|
|
int size = GETSIZE(op.type);
|
|
|
|
vcpu->mmio_is_write = OP_IS_STORE(type);
|
|
|
|
switch (type) {
|
|
case LOAD: {
|
|
int instr_byte_swap = op.type & BYTEREV;
|
|
|
|
if (op.type & SIGNEXT)
|
|
emulated = kvmppc_handle_loads(vcpu,
|
|
op.reg, size, !instr_byte_swap);
|
|
else
|
|
emulated = kvmppc_handle_load(vcpu,
|
|
op.reg, size, !instr_byte_swap);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
|
|
|
|
break;
|
|
}
|
|
#ifdef CONFIG_PPC_FPU
|
|
case LOAD_FP:
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
if (op.type & FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.type & SIGNEXT)
|
|
emulated = kvmppc_handle_loads(vcpu,
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
else
|
|
emulated = kvmppc_handle_load(vcpu,
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_ALTIVEC
|
|
case LOAD_VMX:
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* Hardware enforces alignment of VMX accesses */
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
if (size == 16) { /* lvx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
} else if (size == 4) { /* lvewx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_WORD;
|
|
} else if (size == 2) { /* lvehx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
} else if (size == 1) { /* lvebx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
} else
|
|
break;
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
if (size == 16) {
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
emulated = kvmppc_handle_vmx_load(vcpu,
|
|
KVM_MMIO_REG_VMX|op.reg,
|
|
8, 1);
|
|
} else {
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
|
emulated = kvmppc_handle_vmx_load(vcpu,
|
|
KVM_MMIO_REG_VMX|op.reg,
|
|
size, 1);
|
|
}
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_VSX
|
|
case LOAD_VSX: {
|
|
int io_size_each;
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
} else {
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
}
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.element_size == 8) {
|
|
if (op.vsx_flags & VSX_SPLAT)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
|
else
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
} else if (op.element_size == 4) {
|
|
if (op.vsx_flags & VSX_SPLAT)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
|
|
else
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD;
|
|
} else
|
|
break;
|
|
|
|
if (size < op.element_size) {
|
|
/* precision convert case: lxsspx, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
io_size_each = size;
|
|
} else { /* lxvw4x, lxvd2x, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
size/op.element_size;
|
|
io_size_each = op.element_size;
|
|
}
|
|
|
|
emulated = kvmppc_handle_vsx_load(vcpu,
|
|
KVM_MMIO_REG_VSX|op.reg, io_size_each,
|
|
1, op.type & SIGNEXT);
|
|
break;
|
|
}
|
|
#endif
|
|
case STORE: {
|
|
int instr_byte_swap = op.type & BYTEREV;
|
|
|
|
emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg),
|
|
size, !instr_byte_swap);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
|
|
|
|
break;
|
|
}
|
|
#ifdef CONFIG_PPC_FPU
|
|
case STORE_FP:
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* The FP registers need to be flushed so that
|
|
* kvmppc_handle_store() can read actual FP vals
|
|
* from vcpu->arch.
|
|
*/
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_FP);
|
|
|
|
if (op.type & FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
emulated = kvmppc_handle_store(vcpu,
|
|
kvmppc_get_fpr(vcpu, op.reg), size, 1);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_ALTIVEC
|
|
case STORE_VMX:
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* Hardware enforces alignment of VMX accesses. */
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_VEC);
|
|
if (size == 16) { /* stvx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
} else if (size == 4) { /* stvewx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_WORD;
|
|
} else if (size == 2) { /* stvehx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
} else if (size == 1) { /* stvebx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
} else
|
|
break;
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
if (size == 16) {
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
emulated = kvmppc_handle_vmx_store(vcpu,
|
|
op.reg, 8, 1);
|
|
} else {
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
|
emulated = kvmppc_handle_vmx_store(vcpu,
|
|
op.reg, size, 1);
|
|
}
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_VSX
|
|
case STORE_VSX: {
|
|
int io_size_each;
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
} else {
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
}
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_VSX);
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.element_size == 8)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
else if (op.element_size == 4)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD;
|
|
else
|
|
break;
|
|
|
|
if (size < op.element_size) {
|
|
/* precise conversion case, like stxsspx */
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
io_size_each = size;
|
|
} else { /* stxvw4x, stxvd2x, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
size/op.element_size;
|
|
io_size_each = op.element_size;
|
|
}
|
|
|
|
emulated = kvmppc_handle_vsx_store(vcpu,
|
|
op.reg, io_size_each, 1);
|
|
break;
|
|
}
|
|
#endif
|
|
case CACHEOP:
|
|
/* Do nothing. The guest is performing dcbi because
|
|
* hardware DMA is not snooped by the dcache, but
|
|
* emulated DMA either goes through the dcache as
|
|
* normal writes, or the host kernel has handled dcache
|
|
* coherence.
|
|
*/
|
|
emulated = EMULATE_DONE;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
|
|
|
|
/* Advance past emulated instruction. */
|
|
if (emulated != EMULATE_FAIL)
|
|
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
|
|
|
|
return emulated;
|
|
}
|