Mostly stable material, a lot of ARM fixes.
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJV+/ucAAoJEL/70l94x66DV8YH/1KDym/1GJ+/Br/YkHZnM53l 3Q0PwSLu9cNcIL9lUuDLwGTaVj+y8ud1Hjr/uzvKwivktmUYVZhkdtnZmnanvGOM qKB9K3nFXCPx8uqy8Dn7fOwEKcg9FmDOTTkWy13HDnXO+V4crSVVt+rPw+6FUMld NV5tYdw9Lu7y3XrveDebPWaPtyDL7OAagzmeK47eMffxG7X9Hf1H2aT7HueRi7x/ SkLIe3gmiOWmHVJDPE9TOmFYIj19gywDFysKes1gdVJLVUIXiELMT7SrvAYnToVB zISIEj7Zx4SINPxpf2dUn8REm7NsmJY+PffLIl/Nv+ozGggFQGFH0SMZ08p0bxw= =tfmn -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "Mostly stable material, a lot of ARM fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (22 commits) sched: access local runqueue directly in single_task_running arm/arm64: KVM: Remove 'config KVM_ARM_MAX_VCPUS' arm64: KVM: Remove all traces of the ThumbEE registers arm: KVM: Disable virtual timer even if the guest is not using it arm64: KVM: Disable virtual timer even if the guest is not using it arm/arm64: KVM: vgic: Check for !irqchip_in_kernel() when mapping resources KVM: s390: Replace incorrect atomic_or with atomic_andnot arm: KVM: Fix incorrect device to IPA mapping arm64: KVM: Fix user access for debug registers KVM: vmx: fix VPID is 0000H in non-root operation KVM: add halt_attempted_poll to VCPU stats kvm: fix zero length mmio searching kvm: fix double free for fast mmio eventfd kvm: factor out core eventfd assign/deassign logic kvm: don't try to register to KVM_FAST_MMIO_BUS for non mmio eventfd KVM: make the declaration of functions within 80 characters KVM: arm64: add workaround for Cortex-A57 erratum #852523 KVM: fix polling for guest halt continued even if disable it arm/arm64: KVM: Fix PSCI affinity info return value for non valid cores arm64: KVM: set {v,}TCR_EL2 RES1 bits ...
This commit is contained in:
commit
3ae839454e
@ -29,12 +29,6 @@
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
|
||||
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
|
||||
#else
|
||||
#define KVM_MAX_VCPUS 0
|
||||
#endif
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
@ -44,6 +38,8 @@
|
||||
|
||||
#include <kvm/arm_vgic.h>
|
||||
|
||||
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
|
||||
|
||||
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
|
||||
int __attribute_const__ kvm_target_cpu(void);
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
@ -148,6 +144,7 @@ struct kvm_vm_stat {
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
|
@ -45,15 +45,4 @@ config KVM_ARM_HOST
|
||||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
config KVM_ARM_MAX_VCPUS
|
||||
int "Number maximum supported virtual CPUs per VM"
|
||||
depends on KVM_ARM_HOST
|
||||
default 4
|
||||
help
|
||||
Static number of max supported virtual CPUs per VM.
|
||||
|
||||
If you choose a high number, the vcpu structures will be quite
|
||||
large, so only choose a reasonable number that you expect to
|
||||
actually use.
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
@ -446,7 +446,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
||||
* Map the VGIC hardware resources before running a vcpu the first
|
||||
* time on this VM.
|
||||
*/
|
||||
if (unlikely(!vgic_ready(kvm))) {
|
||||
if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
|
||||
ret = kvm_vgic_map_resources(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 )
|
||||
|
||||
mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
|
||||
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
|
||||
bic r2, #1 @ Clear ENABLE
|
||||
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
|
||||
|
||||
isb
|
||||
|
||||
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
|
||||
@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 )
|
||||
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
|
||||
|
||||
1:
|
||||
mov r2, #0 @ Clear ENABLE
|
||||
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
|
||||
|
||||
@ Allow physical timer/counter access for the host
|
||||
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
|
||||
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
|
||||
|
@ -1792,8 +1792,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
if (vma->vm_flags & VM_PFNMAP) {
|
||||
gpa_t gpa = mem->guest_phys_addr +
|
||||
(vm_start - mem->userspace_addr);
|
||||
phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
|
||||
vm_start - vma->vm_start;
|
||||
phys_addr_t pa;
|
||||
|
||||
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
pa += vm_start - vma->vm_start;
|
||||
|
||||
/* IO region dirty page logging not allowed */
|
||||
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
||||
|
@ -126,7 +126,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
|
||||
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
int i, matching_cpus = 0;
|
||||
unsigned long mpidr;
|
||||
unsigned long target_affinity;
|
||||
unsigned long target_affinity_mask;
|
||||
@ -151,12 +151,16 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
kvm_for_each_vcpu(i, tmp, kvm) {
|
||||
mpidr = kvm_vcpu_get_mpidr_aff(tmp);
|
||||
if (((mpidr & target_affinity_mask) == target_affinity) &&
|
||||
!tmp->arch.pause) {
|
||||
return PSCI_0_2_AFFINITY_LEVEL_ON;
|
||||
if ((mpidr & target_affinity_mask) == target_affinity) {
|
||||
matching_cpus++;
|
||||
if (!tmp->arch.pause)
|
||||
return PSCI_0_2_AFFINITY_LEVEL_ON;
|
||||
}
|
||||
}
|
||||
|
||||
if (!matching_cpus)
|
||||
return PSCI_RET_INVALID_PARAMS;
|
||||
|
||||
return PSCI_0_2_AFFINITY_LEVEL_OFF;
|
||||
}
|
||||
|
||||
|
@ -95,6 +95,7 @@
|
||||
SCTLR_EL2_SA | SCTLR_EL2_I)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
|
||||
#define TCR_EL2_TBI (1 << 20)
|
||||
#define TCR_EL2_PS (7 << 16)
|
||||
#define TCR_EL2_PS_40B (2 << 16)
|
||||
@ -106,9 +107,10 @@
|
||||
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
|
||||
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
|
||||
|
||||
#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
|
||||
#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
|
||||
|
||||
/* VTCR_EL2 Registers bits */
|
||||
#define VTCR_EL2_RES1 (1 << 31)
|
||||
#define VTCR_EL2_PS_MASK (7 << 16)
|
||||
#define VTCR_EL2_TG0_MASK (1 << 14)
|
||||
#define VTCR_EL2_TG0_4K (0 << 14)
|
||||
@ -147,7 +149,8 @@
|
||||
*/
|
||||
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
|
||||
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
|
||||
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
|
||||
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
|
||||
VTCR_EL2_RES1)
|
||||
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
|
||||
#else
|
||||
/*
|
||||
@ -158,7 +161,8 @@
|
||||
*/
|
||||
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
|
||||
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
|
||||
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
|
||||
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
|
||||
VTCR_EL2_RES1)
|
||||
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
|
||||
#endif
|
||||
|
||||
@ -168,7 +172,6 @@
|
||||
#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
|
||||
|
||||
/* Hyp System Trap Register */
|
||||
#define HSTR_EL2_TTEE (1 << 16)
|
||||
#define HSTR_EL2_T(x) (1 << x)
|
||||
|
||||
/* Hyp Coproccessor Trap Register Shifts */
|
||||
|
@ -53,9 +53,7 @@
|
||||
#define IFSR32_EL2 25 /* Instruction Fault Status Register */
|
||||
#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
|
||||
#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
|
||||
#define TEECR32_EL1 28 /* ThumbEE Configuration Register */
|
||||
#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */
|
||||
#define NR_SYS_REGS 30
|
||||
#define NR_SYS_REGS 28
|
||||
|
||||
/* 32bit mapping */
|
||||
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
|
||||
|
@ -30,12 +30,6 @@
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
|
||||
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
|
||||
#else
|
||||
#define KVM_MAX_VCPUS 0
|
||||
#endif
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
@ -43,6 +37,8 @@
|
||||
#include <kvm/arm_vgic.h>
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
|
||||
|
||||
#define KVM_VCPU_MAX_FEATURES 3
|
||||
|
||||
int __attribute_const__ kvm_target_cpu(void);
|
||||
@ -195,6 +191,7 @@ struct kvm_vm_stat {
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
|
@ -41,15 +41,4 @@ config KVM_ARM_HOST
|
||||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
config KVM_ARM_MAX_VCPUS
|
||||
int "Number maximum supported virtual CPUs per VM"
|
||||
depends on KVM_ARM_HOST
|
||||
default 4
|
||||
help
|
||||
Static number of max supported virtual CPUs per VM.
|
||||
|
||||
If you choose a high number, the vcpu structures will be quite
|
||||
large, so only choose a reasonable number that you expect to
|
||||
actually use.
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
@ -433,20 +433,13 @@
|
||||
mrs x5, ifsr32_el2
|
||||
stp x4, x5, [x3]
|
||||
|
||||
skip_fpsimd_state x8, 3f
|
||||
skip_fpsimd_state x8, 2f
|
||||
mrs x6, fpexc32_el2
|
||||
str x6, [x3, #16]
|
||||
3:
|
||||
skip_debug_state x8, 2f
|
||||
2:
|
||||
skip_debug_state x8, 1f
|
||||
mrs x7, dbgvcr32_el2
|
||||
str x7, [x3, #24]
|
||||
2:
|
||||
skip_tee_state x8, 1f
|
||||
|
||||
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
|
||||
mrs x4, teecr32_el1
|
||||
mrs x5, teehbr32_el1
|
||||
stp x4, x5, [x3]
|
||||
1:
|
||||
.endm
|
||||
|
||||
@ -466,16 +459,9 @@
|
||||
msr dacr32_el2, x4
|
||||
msr ifsr32_el2, x5
|
||||
|
||||
skip_debug_state x8, 2f
|
||||
skip_debug_state x8, 1f
|
||||
ldr x7, [x3, #24]
|
||||
msr dbgvcr32_el2, x7
|
||||
2:
|
||||
skip_tee_state x8, 1f
|
||||
|
||||
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
|
||||
ldp x4, x5, [x3]
|
||||
msr teecr32_el1, x4
|
||||
msr teehbr32_el1, x5
|
||||
1:
|
||||
.endm
|
||||
|
||||
@ -570,8 +556,6 @@ alternative_endif
|
||||
mrs x3, cntv_ctl_el0
|
||||
and x3, x3, #3
|
||||
str w3, [x0, #VCPU_TIMER_CNTV_CTL]
|
||||
bic x3, x3, #1 // Clear Enable
|
||||
msr cntv_ctl_el0, x3
|
||||
|
||||
isb
|
||||
|
||||
@ -579,6 +563,9 @@ alternative_endif
|
||||
str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
|
||||
|
||||
1:
|
||||
// Disable the virtual timer
|
||||
msr cntv_ctl_el0, xzr
|
||||
|
||||
// Allow physical timer/counter access for the host
|
||||
mrs x2, cnthctl_el2
|
||||
orr x2, x2, #3
|
||||
@ -753,6 +740,9 @@ ENTRY(__kvm_vcpu_run)
|
||||
// Guest context
|
||||
add x2, x0, #VCPU_CONTEXT
|
||||
|
||||
// We must restore the 32-bit state before the sysregs, thanks
|
||||
// to Cortex-A57 erratum #852523.
|
||||
restore_guest_32bit_state
|
||||
bl __restore_sysregs
|
||||
|
||||
skip_debug_state x3, 1f
|
||||
@ -760,7 +750,6 @@ ENTRY(__kvm_vcpu_run)
|
||||
kern_hyp_va x3
|
||||
bl __restore_debug
|
||||
1:
|
||||
restore_guest_32bit_state
|
||||
restore_guest_regs
|
||||
|
||||
// That's it, no more messing around.
|
||||
|
@ -272,7 +272,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
{
|
||||
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
|
||||
|
||||
if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
|
||||
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
@ -314,7 +314,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
{
|
||||
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
|
||||
|
||||
if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
|
||||
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
@ -358,7 +358,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
{
|
||||
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
|
||||
|
||||
if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
|
||||
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
@ -400,7 +400,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
{
|
||||
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
|
||||
|
||||
if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
|
||||
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
@ -539,13 +539,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
|
||||
trap_dbgauthstatus_el1 },
|
||||
|
||||
/* TEECR32_EL1 */
|
||||
{ Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_val, TEECR32_EL1, 0 },
|
||||
/* TEEHBR32_EL1 */
|
||||
{ Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_val, TEEHBR32_EL1, 0 },
|
||||
|
||||
/* MDCCSR_EL1 */
|
||||
{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
|
||||
trap_raz_wi },
|
||||
|
@ -128,6 +128,7 @@ struct kvm_vcpu_stat {
|
||||
u32 msa_disabled_exits;
|
||||
u32 flush_dcache_exits;
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
|
@ -55,6 +55,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
|
||||
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
|
||||
{NULL}
|
||||
};
|
||||
|
@ -108,6 +108,7 @@ struct kvm_vcpu_stat {
|
||||
u32 dec_exits;
|
||||
u32 ext_intr_exits;
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
u32 dbell_exits;
|
||||
u32 gdbell_exits;
|
||||
|
@ -53,6 +53,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
|
||||
{ "queue_intr", VCPU_STAT(queue_intr) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "pf_storage", VCPU_STAT(pf_storage) },
|
||||
{ "sp_storage", VCPU_STAT(sp_storage) },
|
||||
|
@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "dec", VCPU_STAT(dec_exits) },
|
||||
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "doorbell", VCPU_STAT(dbell_exits) },
|
||||
{ "guest doorbell", VCPU_STAT(gdbell_exits) },
|
||||
|
@ -210,6 +210,7 @@ struct kvm_vcpu_stat {
|
||||
u32 exit_validity;
|
||||
u32 exit_instruction;
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
u32 instruction_lctl;
|
||||
u32 instruction_lctlg;
|
||||
|
@ -63,6 +63,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
|
||||
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
|
||||
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
|
||||
@ -1574,7 +1575,7 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
||||
atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -711,6 +711,7 @@ struct kvm_vcpu_stat {
|
||||
u32 nmi_window_exits;
|
||||
u32 halt_exits;
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_wakeup;
|
||||
u32 request_irq_exits;
|
||||
u32 irq_exits;
|
||||
|
@ -6064,6 +6064,8 @@ static __init int hardware_setup(void)
|
||||
memcpy(vmx_msr_bitmap_longmode_x2apic,
|
||||
vmx_msr_bitmap_longmode, PAGE_SIZE);
|
||||
|
||||
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
|
||||
|
||||
if (enable_apicv) {
|
||||
for (msr = 0x800; msr <= 0x8ff; msr++)
|
||||
vmx_disable_intercept_msr_read_x2apic(msr);
|
||||
|
@ -149,6 +149,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "nmi_window", VCPU_STAT(nmi_window_exits) },
|
||||
{ "halt_exits", VCPU_STAT(halt_exits) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "hypercalls", VCPU_STAT(hypercalls) },
|
||||
{ "request_irq", VCPU_STAT(request_irq_exits) },
|
||||
|
@ -35,11 +35,7 @@
|
||||
#define VGIC_V3_MAX_LRS 16
|
||||
#define VGIC_MAX_IRQS 1024
|
||||
#define VGIC_V2_MAX_CPUS 8
|
||||
|
||||
/* Sanity checks... */
|
||||
#if (KVM_MAX_VCPUS > 255)
|
||||
#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
|
||||
#endif
|
||||
#define VGIC_V3_MAX_CPUS 255
|
||||
|
||||
#if (VGIC_NR_IRQS_LEGACY & 31)
|
||||
#error "VGIC_NR_IRQS must be a multiple of 32"
|
||||
|
@ -2669,13 +2669,20 @@ unsigned long nr_running(void)
|
||||
|
||||
/*
|
||||
* Check if only the current task is running on the cpu.
|
||||
*
|
||||
* Caution: this function does not check that the caller has disabled
|
||||
* preemption, thus the result might have a time-of-check-to-time-of-use
|
||||
* race. The caller is responsible to use it correctly, for example:
|
||||
*
|
||||
* - from a non-preemptable section (of course)
|
||||
*
|
||||
* - from a thread that is bound to a single CPU
|
||||
*
|
||||
* - in a loop with very short iterations (e.g. a polling loop)
|
||||
*/
|
||||
bool single_task_running(void)
|
||||
{
|
||||
if (cpu_rq(smp_processor_id())->nr_running == 1)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
return raw_rq()->nr_running == 1;
|
||||
}
|
||||
EXPORT_SYMBOL(single_task_running);
|
||||
|
||||
|
@ -199,6 +199,14 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
timer->irq = irq;
|
||||
|
||||
/*
|
||||
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
|
||||
* and to 0 for ARMv7. We provide an implementation that always
|
||||
* resets the timer to be disabled and unmasked and is compliant with
|
||||
* the ARMv7 architecture.
|
||||
*/
|
||||
timer->cntv_ctl = 0;
|
||||
|
||||
/*
|
||||
* Tell the VGIC that the virtual interrupt is tied to a
|
||||
* physical interrupt. We do that once per VCPU.
|
||||
|
@ -288,7 +288,7 @@ int vgic_v3_probe(struct device_node *vgic_node,
|
||||
|
||||
vgic->vctrl_base = NULL;
|
||||
vgic->type = VGIC_V3;
|
||||
vgic->max_gic_vcpus = KVM_MAX_VCPUS;
|
||||
vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS;
|
||||
|
||||
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
|
||||
vcpu_res.start, vgic->maint_irq);
|
||||
|
@ -1144,26 +1144,11 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
|
||||
struct irq_phys_map *map;
|
||||
map = vgic_irq_map_search(vcpu, irq);
|
||||
|
||||
/*
|
||||
* If we have a mapping, and the virtual interrupt is
|
||||
* being injected, then we must set the state to
|
||||
* active in the physical world. Otherwise the
|
||||
* physical interrupt will fire and the guest will
|
||||
* exit before processing the virtual interrupt.
|
||||
*/
|
||||
if (map) {
|
||||
int ret;
|
||||
|
||||
BUG_ON(!map->active);
|
||||
vlr.hwirq = map->phys_irq;
|
||||
vlr.state |= LR_HW;
|
||||
vlr.state &= ~LR_EOI_INT;
|
||||
|
||||
ret = irq_set_irqchip_state(map->irq,
|
||||
IRQCHIP_STATE_ACTIVE,
|
||||
true);
|
||||
WARN_ON(ret);
|
||||
|
||||
/*
|
||||
* Make sure we're not going to sample this
|
||||
* again, as a HW-backed interrupt cannot be
|
||||
@ -1255,7 +1240,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
unsigned long *pa_percpu, *pa_shared;
|
||||
int i, vcpu_id;
|
||||
int i, vcpu_id, lr, ret;
|
||||
int overflow = 0;
|
||||
int nr_shared = vgic_nr_shared_irqs(dist);
|
||||
|
||||
@ -1310,6 +1295,31 @@ epilog:
|
||||
*/
|
||||
clear_bit(vcpu_id, dist->irq_pending_on_cpu);
|
||||
}
|
||||
|
||||
for (lr = 0; lr < vgic->nr_lr; lr++) {
|
||||
struct vgic_lr vlr;
|
||||
|
||||
if (!test_bit(lr, vgic_cpu->lr_used))
|
||||
continue;
|
||||
|
||||
vlr = vgic_get_lr(vcpu, lr);
|
||||
|
||||
/*
|
||||
* If we have a mapping, and the virtual interrupt is
|
||||
* presented to the guest (as pending or active), then we must
|
||||
* set the state to active in the physical world. See
|
||||
* Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
|
||||
*/
|
||||
if (vlr.state & LR_HW) {
|
||||
struct irq_phys_map *map;
|
||||
map = vgic_irq_map_search(vcpu, vlr.irq);
|
||||
|
||||
ret = irq_set_irqchip_state(map->irq,
|
||||
IRQCHIP_STATE_ACTIVE,
|
||||
true);
|
||||
WARN_ON(ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
|
@ -24,9 +24,9 @@ struct kvm_coalesced_mmio_dev {
|
||||
int kvm_coalesced_mmio_init(struct kvm *kvm);
|
||||
void kvm_coalesced_mmio_free(struct kvm *kvm);
|
||||
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
|
||||
struct kvm_coalesced_mmio_zone *zone);
|
||||
struct kvm_coalesced_mmio_zone *zone);
|
||||
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
|
||||
struct kvm_coalesced_mmio_zone *zone);
|
||||
struct kvm_coalesced_mmio_zone *zone);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -771,40 +771,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
|
||||
return KVM_MMIO_BUS;
|
||||
}
|
||||
|
||||
static int
|
||||
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
|
||||
enum kvm_bus bus_idx,
|
||||
struct kvm_ioeventfd *args)
|
||||
{
|
||||
enum kvm_bus bus_idx;
|
||||
struct _ioeventfd *p;
|
||||
struct eventfd_ctx *eventfd;
|
||||
int ret;
|
||||
|
||||
bus_idx = ioeventfd_bus_from_flags(args->flags);
|
||||
/* must be natural-word sized, or 0 to ignore length */
|
||||
switch (args->len) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check for range overflow */
|
||||
if (args->addr + args->len < args->addr)
|
||||
return -EINVAL;
|
||||
|
||||
/* check for extra flags that we don't understand */
|
||||
if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
/* ioeventfd with no length can't be combined with DATAMATCH */
|
||||
if (!args->len &&
|
||||
args->flags & (KVM_IOEVENTFD_FLAG_PIO |
|
||||
KVM_IOEVENTFD_FLAG_DATAMATCH))
|
||||
return -EINVAL;
|
||||
struct eventfd_ctx *eventfd;
|
||||
struct _ioeventfd *p;
|
||||
int ret;
|
||||
|
||||
eventfd = eventfd_ctx_fdget(args->fd);
|
||||
if (IS_ERR(eventfd))
|
||||
@ -843,16 +817,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
if (ret < 0)
|
||||
goto unlock_fail;
|
||||
|
||||
/* When length is ignored, MMIO is also put on a separate bus, for
|
||||
* faster lookups.
|
||||
*/
|
||||
if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
|
||||
ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
|
||||
p->addr, 0, &p->dev);
|
||||
if (ret < 0)
|
||||
goto register_fail;
|
||||
}
|
||||
|
||||
kvm->buses[bus_idx]->ioeventfd_count++;
|
||||
list_add_tail(&p->list, &kvm->ioeventfds);
|
||||
|
||||
@ -860,8 +824,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
|
||||
return 0;
|
||||
|
||||
register_fail:
|
||||
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
|
||||
unlock_fail:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
@ -873,14 +835,13 @@ fail:
|
||||
}
|
||||
|
||||
static int
|
||||
kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||
struct kvm_ioeventfd *args)
|
||||
{
|
||||
enum kvm_bus bus_idx;
|
||||
struct _ioeventfd *p, *tmp;
|
||||
struct eventfd_ctx *eventfd;
|
||||
int ret = -ENOENT;
|
||||
|
||||
bus_idx = ioeventfd_bus_from_flags(args->flags);
|
||||
eventfd = eventfd_ctx_fdget(args->fd);
|
||||
if (IS_ERR(eventfd))
|
||||
return PTR_ERR(eventfd);
|
||||
@ -901,10 +862,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
continue;
|
||||
|
||||
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
|
||||
if (!p->length) {
|
||||
kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
|
||||
&p->dev);
|
||||
}
|
||||
kvm->buses[bus_idx]->ioeventfd_count--;
|
||||
ioeventfd_release(p);
|
||||
ret = 0;
|
||||
@ -918,6 +875,71 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
{
|
||||
enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
|
||||
int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
|
||||
|
||||
if (!args->len && bus_idx == KVM_MMIO_BUS)
|
||||
kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
{
|
||||
enum kvm_bus bus_idx;
|
||||
int ret;
|
||||
|
||||
bus_idx = ioeventfd_bus_from_flags(args->flags);
|
||||
/* must be natural-word sized, or 0 to ignore length */
|
||||
switch (args->len) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check for range overflow */
|
||||
if (args->addr + args->len < args->addr)
|
||||
return -EINVAL;
|
||||
|
||||
/* check for extra flags that we don't understand */
|
||||
if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
/* ioeventfd with no length can't be combined with DATAMATCH */
|
||||
if (!args->len &&
|
||||
args->flags & (KVM_IOEVENTFD_FLAG_PIO |
|
||||
KVM_IOEVENTFD_FLAG_DATAMATCH))
|
||||
return -EINVAL;
|
||||
|
||||
ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* When length is ignored, MMIO is also put on a separate bus, for
|
||||
* faster lookups.
|
||||
*/
|
||||
if (!args->len && bus_idx == KVM_MMIO_BUS) {
|
||||
ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
|
||||
if (ret < 0)
|
||||
goto fast_fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fast_fail:
|
||||
kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
{
|
||||
|
@ -2004,6 +2004,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->halt_poll_ns) {
|
||||
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
|
||||
|
||||
++vcpu->stat.halt_attempted_poll;
|
||||
do {
|
||||
/*
|
||||
* This sets KVM_REQ_UNHALT if an interrupt
|
||||
@ -2043,7 +2044,8 @@ out:
|
||||
else if (vcpu->halt_poll_ns < halt_poll_ns &&
|
||||
block_ns < halt_poll_ns)
|
||||
grow_halt_poll_ns(vcpu);
|
||||
}
|
||||
} else
|
||||
vcpu->halt_poll_ns = 0;
|
||||
|
||||
trace_kvm_vcpu_wakeup(block_ns, waited);
|
||||
}
|
||||
@ -3156,10 +3158,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
|
||||
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
|
||||
const struct kvm_io_range *r2)
|
||||
{
|
||||
if (r1->addr < r2->addr)
|
||||
gpa_t addr1 = r1->addr;
|
||||
gpa_t addr2 = r2->addr;
|
||||
|
||||
if (addr1 < addr2)
|
||||
return -1;
|
||||
if (r1->addr + r1->len > r2->addr + r2->len)
|
||||
|
||||
/* If r2->len == 0, match the exact address. If r2->len != 0,
|
||||
* accept any overlapping write. Any order is acceptable for
|
||||
* overlapping ranges, because kvm_io_bus_get_first_dev ensures
|
||||
* we process all of them.
|
||||
*/
|
||||
if (r2->len) {
|
||||
addr1 += r1->len;
|
||||
addr2 += r2->len;
|
||||
}
|
||||
|
||||
if (addr1 > addr2)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user