forked from Minki/linux
ARM:
- Move the arch-specific code into arch/arm64/kvm - Start the post-32bit cleanup - Cherry-pick a few non-invasive pre-NV patches x86: - Rework of TLB flushing - Rework of event injection, especially with respect to nested virtualization - Nested AMD event injection facelift, building on the rework of generic code and fixing a lot of corner cases - Nested AMD live migration support - Optimization for TSC deadline MSR writes and IPIs - Various cleanups - Asynchronous page fault cleanups (from tglx, common topic branch with tip tree) - Interrupt-based delivery of asynchronous "page ready" events (host side) - Hyper-V MSRs and hypercalls for guest debugging - VMX preemption timer fixes s390: - Cleanups Generic: - switch vCPU thread wakeup from swait to rcuwait The other architectures, and the guest side of the asynchronous page fault work, will come next week. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl7VJcYUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroPf6QgAq4wU5wdd1lTGz/i3DIhNVJNJgJlp ozLzRdMaJbdbn5RpAK6PEBd9+pt3+UlojpFB3gpJh2Nazv2OzV4yLQgXXXyyMEx1 5Hg7b4UCJYDrbkCiegNRv7f/4FWDkQ9dx++RZITIbxeskBBCEI+I7GnmZhGWzuC4 7kj4ytuKAySF2OEJu0VQF6u0CvrNYfYbQIRKBXjtOwuRK4Q6L63FGMJpYo159MBQ asg3B1jB5TcuGZ9zrjL5LkuzaP4qZZHIRs+4kZsH9I6MODHGUxKonrkablfKxyKy CFK+iaHCuEXXty5K0VmWM3nrTfvpEjVjbMc7e1QGBQ5oXsDM0pqn84syRg== =v7Wn -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm updates from Paolo Bonzini: "ARM: - Move the arch-specific code into arch/arm64/kvm - Start the post-32bit cleanup - Cherry-pick a few non-invasive pre-NV patches x86: - Rework of TLB flushing - Rework of event injection, especially with respect to nested virtualization - Nested AMD event injection facelift, building on the rework of generic code and fixing a lot of corner cases - Nested AMD live migration support - Optimization for TSC deadline MSR writes and IPIs - Various cleanups - Asynchronous page fault cleanups (from tglx, common topic branch with tip tree) - Interrupt-based delivery of asynchronous "page ready" events (host side) - Hyper-V MSRs and hypercalls for guest debugging - VMX preemption timer fixes s390: - Cleanups Generic: - switch vCPU thread wakeup from swait to rcuwait The other architectures, and the guest side of the asynchronous page fault work, will come next week" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (256 commits) KVM: selftests: fix rdtsc() for vmx_tsc_adjust_test KVM: check userspace_addr for all memslots KVM: selftests: update hyperv_cpuid with SynDBG tests x86/kvm/hyper-v: Add support for synthetic debugger via hypercalls x86/kvm/hyper-v: enable hypercalls regardless of hypercall page x86/kvm/hyper-v: Add support for synthetic debugger interface x86/hyper-v: Add synthetic debugger definitions KVM: selftests: VMX preemption timer migration test KVM: nVMX: Fix VMX preemption timer migration x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit KVM: x86/pmu: Support full width counting KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in KVM: x86: announce KVM_FEATURE_ASYNC_PF_INT KVM: x86: acknowledgment mechanism for async pf page ready notifications KVM: x86: interrupt based APF 'page ready' event delivery KVM: introduce kvm_read_guest_offset_cached() KVM: rename kvm_arch_can_inject_async_page_present() to kvm_arch_can_dequeue_async_page_present() KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info Revert "KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page Ready" exceptions simultaneously" KVM: VMX: Replace zero-length array with flexible-array ...
This commit is contained in:
commit
039aeb9deb
@ -4336,9 +4336,13 @@ Errors:
|
||||
#define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE 0x00000001
|
||||
#define KVM_STATE_NESTED_VMX_SMM_VMXON 0x00000002
|
||||
|
||||
#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
|
||||
|
||||
struct kvm_vmx_nested_state_hdr {
|
||||
__u32 flags;
|
||||
__u64 vmxon_pa;
|
||||
__u64 vmcs12_pa;
|
||||
__u64 preemption_timer_deadline;
|
||||
|
||||
struct {
|
||||
__u16 flags;
|
||||
@ -5068,10 +5072,13 @@ EOI was received.
|
||||
struct kvm_hyperv_exit {
|
||||
#define KVM_EXIT_HYPERV_SYNIC 1
|
||||
#define KVM_EXIT_HYPERV_HCALL 2
|
||||
#define KVM_EXIT_HYPERV_SYNDBG 3
|
||||
__u32 type;
|
||||
__u32 pad1;
|
||||
union {
|
||||
struct {
|
||||
__u32 msr;
|
||||
__u32 pad2;
|
||||
__u64 control;
|
||||
__u64 evt_page;
|
||||
__u64 msg_page;
|
||||
@ -5081,6 +5088,15 @@ EOI was received.
|
||||
__u64 result;
|
||||
__u64 params[2];
|
||||
} hcall;
|
||||
struct {
|
||||
__u32 msr;
|
||||
__u32 pad2;
|
||||
__u64 control;
|
||||
__u64 status;
|
||||
__u64 send_page;
|
||||
__u64 recv_page;
|
||||
__u64 pending_page;
|
||||
} syndbg;
|
||||
} u;
|
||||
};
|
||||
/* KVM_EXIT_HYPERV */
|
||||
@ -5097,6 +5113,12 @@ Hyper-V SynIC state change. Notification is used to remap SynIC
|
||||
event/message pages and to enable/disable SynIC messages/events processing
|
||||
in userspace.
|
||||
|
||||
- KVM_EXIT_HYPERV_SYNDBG -- synchronously notify user-space about
|
||||
|
||||
Hyper-V Synthetic debugger state change. Notification is used to either update
|
||||
the pending_page location or to send a control command (send the buffer located
|
||||
in send_page or recv a buffer to recv_page).
|
||||
|
||||
::
|
||||
|
||||
/* KVM_EXIT_ARM_NISV */
|
||||
@ -5779,7 +5801,7 @@ will be initialized to 1 when created. This also improves performance because
|
||||
dirty logging can be enabled gradually in small chunks on the first call
|
||||
to KVM_CLEAR_DIRTY_LOG. KVM_DIRTY_LOG_INITIALLY_SET depends on
|
||||
KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (it is also only available on
|
||||
x86 for now).
|
||||
x86 and arm64 for now).
|
||||
|
||||
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name
|
||||
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make
|
||||
@ -5804,6 +5826,23 @@ If present, this capability can be enabled for a VM, meaning that KVM
|
||||
will allow the transition to secure guest mode. Otherwise KVM will
|
||||
veto the transition.
|
||||
|
||||
7.20 KVM_CAP_HALT_POLL
|
||||
----------------------
|
||||
|
||||
:Architectures: all
|
||||
:Target: VM
|
||||
:Parameters: args[0] is the maximum poll time in nanoseconds
|
||||
:Returns: 0 on success; -1 on error
|
||||
|
||||
This capability overrides the kvm module parameter halt_poll_ns for the
|
||||
target VM.
|
||||
|
||||
VCPU polling allows a VCPU to poll for wakeup events instead of immediately
|
||||
scheduling during guest halts. The maximum time a VCPU can spend polling is
|
||||
controlled by the kvm module parameter halt_poll_ns. This capability allows
|
||||
the maximum halt time to specified on a per-VM basis, effectively overriding
|
||||
the module parameter for the target VM.
|
||||
|
||||
8. Other capabilities.
|
||||
======================
|
||||
|
||||
|
@ -50,8 +50,8 @@ KVM_FEATURE_NOP_IO_DELAY 1 not necessary to perform delays
|
||||
KVM_FEATURE_MMU_OP 2 deprecated
|
||||
|
||||
KVM_FEATURE_CLOCKSOURCE2 3 kvmclock available at msrs
|
||||
|
||||
0x4b564d00 and 0x4b564d01
|
||||
|
||||
KVM_FEATURE_ASYNC_PF 4 async pf can be enabled by
|
||||
writing to msr 0x4b564d02
|
||||
|
||||
@ -86,6 +86,12 @@ KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit
|
||||
before using paravirtualized
|
||||
sched yield.
|
||||
|
||||
KVM_FEATURE_ASYNC_PF_INT 14 guest checks this feature bit
|
||||
before using the second async
|
||||
pf control msr 0x4b564d06 and
|
||||
async pf acknowledgment msr
|
||||
0x4b564d07.
|
||||
|
||||
KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24 host will warn if no guest-side
|
||||
per-cpu warps are expeced in
|
||||
kvmclock
|
||||
|
@ -190,41 +190,72 @@ MSR_KVM_ASYNC_PF_EN:
|
||||
0x4b564d02
|
||||
|
||||
data:
|
||||
Bits 63-6 hold 64-byte aligned physical address of a
|
||||
64 byte memory area which must be in guest RAM and must be
|
||||
zeroed. Bits 5-3 are reserved and should be zero. Bit 0 is 1
|
||||
when asynchronous page faults are enabled on the vcpu 0 when
|
||||
disabled. Bit 1 is 1 if asynchronous page faults can be injected
|
||||
when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults
|
||||
are delivered to L1 as #PF vmexits. Bit 2 can be set only if
|
||||
KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID.
|
||||
Asynchronous page fault (APF) control MSR.
|
||||
|
||||
First 4 byte of 64 byte memory location will be written to by
|
||||
the hypervisor at the time of asynchronous page fault (APF)
|
||||
injection to indicate type of asynchronous page fault. Value
|
||||
of 1 means that the page referred to by the page fault is not
|
||||
present. Value 2 means that the page is now available. Disabling
|
||||
interrupt inhibits APFs. Guest must not enable interrupt
|
||||
before the reason is read, or it may be overwritten by another
|
||||
APF. Since APF uses the same exception vector as regular page
|
||||
fault guest must reset the reason to 0 before it does
|
||||
something that can generate normal page fault. If during page
|
||||
fault APF reason is 0 it means that this is regular page
|
||||
fault.
|
||||
Bits 63-6 hold 64-byte aligned physical address of a 64 byte memory area
|
||||
which must be in guest RAM and must be zeroed. This memory is expected
|
||||
to hold a copy of the following structure::
|
||||
|
||||
During delivery of type 1 APF cr2 contains a token that will
|
||||
be used to notify a guest when missing page becomes
|
||||
available. When page becomes available type 2 APF is sent with
|
||||
cr2 set to the token associated with the page. There is special
|
||||
kind of token 0xffffffff which tells vcpu that it should wake
|
||||
up all processes waiting for APFs and no individual type 2 APFs
|
||||
will be sent.
|
||||
struct kvm_vcpu_pv_apf_data {
|
||||
/* Used for 'page not present' events delivered via #PF */
|
||||
__u32 flags;
|
||||
|
||||
/* Used for 'page ready' events delivered via interrupt notification */
|
||||
__u32 token;
|
||||
|
||||
__u8 pad[56];
|
||||
__u32 enabled;
|
||||
};
|
||||
|
||||
Bits 5-4 of the MSR are reserved and should be zero. Bit 0 is set to 1
|
||||
when asynchronous page faults are enabled on the vcpu, 0 when disabled.
|
||||
Bit 1 is 1 if asynchronous page faults can be injected when vcpu is in
|
||||
cpl == 0. Bit 2 is 1 if asynchronous page faults are delivered to L1 as
|
||||
#PF vmexits. Bit 2 can be set only if KVM_FEATURE_ASYNC_PF_VMEXIT is
|
||||
present in CPUID. Bit 3 enables interrupt based delivery of 'page ready'
|
||||
events. Bit 3 can only be set if KVM_FEATURE_ASYNC_PF_INT is present in
|
||||
CPUID.
|
||||
|
||||
'Page not present' events are currently always delivered as synthetic
|
||||
#PF exception. During delivery of these events APF CR2 register contains
|
||||
a token that will be used to notify the guest when missing page becomes
|
||||
available. Also, to make it possible to distinguish between real #PF and
|
||||
APF, first 4 bytes of 64 byte memory location ('flags') will be written
|
||||
to by the hypervisor at the time of injection. Only first bit of 'flags'
|
||||
is currently supported, when set, it indicates that the guest is dealing
|
||||
with asynchronous 'page not present' event. If during a page fault APF
|
||||
'flags' is '0' it means that this is regular page fault. Guest is
|
||||
supposed to clear 'flags' when it is done handling #PF exception so the
|
||||
next event can be delivered.
|
||||
|
||||
Note, since APF 'page not present' events use the same exception vector
|
||||
as regular page fault, guest must reset 'flags' to '0' before it does
|
||||
something that can generate normal page fault.
|
||||
|
||||
Bytes 5-7 of 64 byte memory location ('token') will be written to by the
|
||||
hypervisor at the time of APF 'page ready' event injection. The content
|
||||
of these bytes is a token which was previously delivered as 'page not
|
||||
present' event. The event indicates the page in now available. Guest is
|
||||
supposed to write '0' to 'token' when it is done handling 'page ready'
|
||||
event and to write 1' to MSR_KVM_ASYNC_PF_ACK after clearing the location;
|
||||
writing to the MSR forces KVM to re-scan its queue and deliver the next
|
||||
pending notification.
|
||||
|
||||
Note, MSR_KVM_ASYNC_PF_INT MSR specifying the interrupt vector for 'page
|
||||
ready' APF delivery needs to be written to before enabling APF mechanism
|
||||
in MSR_KVM_ASYNC_PF_EN or interrupt #0 can get injected. The MSR is
|
||||
available if KVM_FEATURE_ASYNC_PF_INT is present in CPUID.
|
||||
|
||||
Note, previously, 'page ready' events were delivered via the same #PF
|
||||
exception as 'page not present' events but this is now deprecated. If
|
||||
bit 3 (interrupt based delivery) is not set APF events are not delivered.
|
||||
|
||||
If APF is disabled while there are outstanding APFs, they will
|
||||
not be delivered.
|
||||
|
||||
Currently type 2 APF will be always delivered on the same vcpu as
|
||||
type 1 was, but guest should not rely on that.
|
||||
Currently 'page ready' APF events will be always delivered on the
|
||||
same vcpu as 'page not present' event was, but guest should not rely on
|
||||
that.
|
||||
|
||||
MSR_KVM_STEAL_TIME:
|
||||
0x4b564d03
|
||||
@ -319,3 +350,29 @@ data:
|
||||
|
||||
KVM guests can request the host not to poll on HLT, for example if
|
||||
they are performing polling themselves.
|
||||
|
||||
MSR_KVM_ASYNC_PF_INT:
|
||||
0x4b564d06
|
||||
|
||||
data:
|
||||
Second asynchronous page fault (APF) control MSR.
|
||||
|
||||
Bits 0-7: APIC vector for delivery of 'page ready' APF events.
|
||||
Bits 8-63: Reserved
|
||||
|
||||
Interrupt vector for asynchnonous 'page ready' notifications delivery.
|
||||
The vector has to be set up before asynchronous page fault mechanism
|
||||
is enabled in MSR_KVM_ASYNC_PF_EN. The MSR is only available if
|
||||
KVM_FEATURE_ASYNC_PF_INT is present in CPUID.
|
||||
|
||||
MSR_KVM_ASYNC_PF_ACK:
|
||||
0x4b564d07
|
||||
|
||||
data:
|
||||
Asynchronous page fault (APF) acknowledgment.
|
||||
|
||||
When the guest is done processing 'page ready' APF event and 'token'
|
||||
field in 'struct kvm_vcpu_pv_apf_data' is cleared it is supposed to
|
||||
write '1' to bit 0 of the MSR, this causes the host to re-scan its queue
|
||||
and check if there are more notifications pending. The MSR is available
|
||||
if KVM_FEATURE_ASYNC_PF_INT is present in CPUID.
|
||||
|
@ -116,10 +116,7 @@ struct shadow_vmcs is ever changed.
|
||||
natural_width cr4_guest_host_mask;
|
||||
natural_width cr0_read_shadow;
|
||||
natural_width cr4_read_shadow;
|
||||
natural_width cr3_target_value0;
|
||||
natural_width cr3_target_value1;
|
||||
natural_width cr3_target_value2;
|
||||
natural_width cr3_target_value3;
|
||||
natural_width dead_space[4]; /* Last remnants of cr3_target_value[0-3]. */
|
||||
natural_width exit_qualification;
|
||||
natural_width guest_linear_address;
|
||||
natural_width guest_cr0;
|
||||
|
@ -9368,7 +9368,6 @@ F: arch/arm64/include/asm/kvm*
|
||||
F: arch/arm64/include/uapi/asm/kvm*
|
||||
F: arch/arm64/kvm/
|
||||
F: include/kvm/arm_*
|
||||
F: virt/kvm/arm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
|
||||
L: linux-mips@vger.kernel.org
|
||||
|
@ -64,12 +64,14 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
|
||||
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
|
||||
|
||||
extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __kvm_enable_ssbs(void);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
|
@ -46,6 +46,9 @@
|
||||
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
||||
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
|
||||
|
||||
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
|
||||
KVM_DIRTY_LOG_INITIALLY_SET)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
extern unsigned int kvm_sve_max_vl;
|
||||
@ -112,12 +115,8 @@ struct kvm_vcpu_fault_info {
|
||||
u64 disr_el1; /* Deferred [SError] Status Register */
|
||||
};
|
||||
|
||||
/*
|
||||
* 0 is reserved as an invalid value.
|
||||
* Order should be kept in sync with the save/restore code.
|
||||
*/
|
||||
enum vcpu_sysreg {
|
||||
__INVALID_SYSREG__,
|
||||
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */
|
||||
MPIDR_EL1, /* MultiProcessor Affinity Register */
|
||||
CSSELR_EL1, /* Cache Size Selection Register */
|
||||
SCTLR_EL1, /* System Control Register */
|
||||
@ -415,6 +414,8 @@ struct kvm_vm_stat {
|
||||
struct kvm_vcpu_stat {
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
u64 halt_poll_success_ns;
|
||||
u64 halt_poll_fail_ns;
|
||||
u64 halt_poll_invalid;
|
||||
u64 halt_wakeup;
|
||||
u64 hvc_exit_stat;
|
||||
@ -530,39 +531,6 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
|
||||
cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
|
||||
}
|
||||
|
||||
void __kvm_enable_ssbs(void);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
unsigned long hyp_stack_ptr,
|
||||
unsigned long vector_ptr)
|
||||
{
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
* kernel's mapping to the linear mapping, and store it in tpidr_el2
|
||||
* so that we can use adr_l to access per-cpu variables in EL2.
|
||||
*/
|
||||
u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
|
||||
(u64)kvm_ksym_ref(kvm_host_data));
|
||||
|
||||
/*
|
||||
* Call initialization code, and switch to the full blown HYP code.
|
||||
* If the cpucaps haven't been finalized yet, something has gone very
|
||||
* wrong, and hyp will crash and burn when it uses any
|
||||
* cpus_have_const_cap() wrapper.
|
||||
*/
|
||||
BUG_ON(!system_capabilities_finalized());
|
||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
|
||||
|
||||
/*
|
||||
* Disabling SSBD on a non-VHE system requires us to enable SSBS
|
||||
* at EL2.
|
||||
*/
|
||||
if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
|
||||
arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
|
||||
kvm_call_hyp(__kvm_enable_ssbs);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool kvm_arch_requires_vhe(void)
|
||||
{
|
||||
/*
|
||||
@ -594,8 +562,6 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
|
||||
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
|
||||
static inline void __cpu_init_stage2(void) {}
|
||||
|
||||
/* Guest/host FPSIMD coordination helpers */
|
||||
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
|
||||
|
@ -55,12 +55,12 @@
|
||||
|
||||
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __timer_enable_traps(struct kvm_vcpu *vcpu);
|
||||
|
@ -363,8 +363,6 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
|
||||
}
|
||||
}
|
||||
|
||||
#define kvm_virt_to_phys(x) __pa_symbol(x)
|
||||
|
||||
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
|
||||
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
|
||||
|
||||
@ -473,7 +471,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
||||
extern void *__kvm_bp_vect_base;
|
||||
extern int __kvm_harden_el2_vector_slot;
|
||||
|
||||
/* This is only called on a VHE system */
|
||||
/* This is called on both VHE and !VHE systems */
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
||||
|
@ -35,7 +35,7 @@
|
||||
#define GIC_PRIO_PSR_I_SET (1 << 4)
|
||||
|
||||
/* Additional SPSR bits not exposed in the UABI */
|
||||
|
||||
#define PSR_MODE_THREAD_BIT (1 << 0)
|
||||
#define PSR_IL_BIT (1 << 20)
|
||||
|
||||
/* AArch32-specific ptrace requests */
|
||||
|
@ -85,7 +85,7 @@ static inline bool is_kernel_in_hyp_mode(void)
|
||||
|
||||
static __always_inline bool has_vhe(void)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
|
||||
if (cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -97,7 +97,7 @@ int main(void)
|
||||
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
|
||||
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
||||
BLANK();
|
||||
#ifdef CONFIG_KVM_ARM_HOST
|
||||
#ifdef CONFIG_KVM
|
||||
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
|
||||
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
|
||||
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
|
||||
|
@ -234,7 +234,7 @@ static int detect_harden_bp_fw(void)
|
||||
smccc_end = NULL;
|
||||
break;
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc;
|
||||
|
@ -422,7 +422,7 @@ static void __init hyp_mode_check(void)
|
||||
"CPU: CPUs started in inconsistent modes");
|
||||
else
|
||||
pr_info("CPU: All CPU(s) started at EL1\n");
|
||||
if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
|
||||
if (IS_ENABLED(CONFIG_KVM))
|
||||
kvm_compute_layout();
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
# KVM configuration
|
||||
#
|
||||
|
||||
source "virt/kvm/Kconfig"
|
||||
source "virt/lib/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
@ -18,7 +17,7 @@ menuconfig VIRTUALIZATION
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
menuconfig KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on OF
|
||||
# for TASKSTATS/TASK_DELAY_ACCT:
|
||||
@ -28,13 +27,11 @@ config KVM
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
select KVM_MMIO
|
||||
select KVM_ARM_HOST
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select SRCU
|
||||
select KVM_VFIO
|
||||
select HAVE_KVM_EVENTFD
|
||||
select HAVE_KVM_IRQFD
|
||||
select KVM_ARM_PMU if HW_PERF_EVENTS
|
||||
select HAVE_KVM_MSI
|
||||
select HAVE_KVM_IRQCHIP
|
||||
select HAVE_KVM_IRQ_ROUTING
|
||||
@ -45,23 +42,24 @@ config KVM
|
||||
select TASK_DELAY_ACCT
|
||||
---help---
|
||||
Support hosting virtualized guest machines.
|
||||
We don't support KVM with 16K page tables yet, due to the multiple
|
||||
levels of fake page tables.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config KVM_ARM_HOST
|
||||
bool
|
||||
---help---
|
||||
Provides host support for ARM processors.
|
||||
if KVM
|
||||
|
||||
source "virt/kvm/Kconfig"
|
||||
|
||||
config KVM_ARM_PMU
|
||||
bool
|
||||
bool "Virtual Performance Monitoring Unit (PMU) support"
|
||||
depends on HW_PERF_EVENTS
|
||||
default y
|
||||
---help---
|
||||
Adds support for a virtual Performance Monitoring Unit (PMU) in
|
||||
virtual machines.
|
||||
|
||||
config KVM_INDIRECT_VECTORS
|
||||
def_bool KVM && (HARDEN_BRANCH_PREDICTOR || HARDEN_EL2_VECTORS)
|
||||
def_bool HARDEN_BRANCH_PREDICTOR || HARDEN_EL2_VECTORS
|
||||
|
||||
endif # KVM
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
@ -3,37 +3,25 @@
|
||||
# Makefile for Kernel-based Virtual Machine module
|
||||
#
|
||||
|
||||
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
|
||||
ccflags-y += -I $(srctree)/$(src)
|
||||
|
||||
KVM=../../../virt/kvm
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-$(CONFIG_KVM) += hyp/
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvtime.o
|
||||
kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
|
||||
$(KVM)/vfio.o $(KVM)/irqchip.o \
|
||||
arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
|
||||
inject_fault.o regmap.o va_layout.o hyp.o hyp-init.o handle_exit.o \
|
||||
guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o \
|
||||
vgic-sys-reg-v3.o fpsimd.o pmu.o \
|
||||
aarch32.o arch_timer.o \
|
||||
vgic/vgic.o vgic/vgic-init.o \
|
||||
vgic/vgic-irqfd.o vgic/vgic-v2.o \
|
||||
vgic/vgic-v3.o vgic/vgic-v4.o \
|
||||
vgic/vgic-mmio.o vgic/vgic-mmio-v2.o \
|
||||
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
|
||||
vgic/vgic-its.o vgic/vgic-debug.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v4.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-its.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-debug.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/irqchip.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
|
||||
kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
|
||||
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
|
||||
|
@ -451,17 +451,7 @@ out:
|
||||
|
||||
static void set_cntvoff(u64 cntvoff)
|
||||
{
|
||||
u32 low = lower_32_bits(cntvoff);
|
||||
u32 high = upper_32_bits(cntvoff);
|
||||
|
||||
/*
|
||||
* Since kvm_call_hyp doesn't fully support the ARM PCS especially on
|
||||
* 32-bit systems, but rather passes register by register shifted one
|
||||
* place (we put the function address in r0/x0), we cannot simply pass
|
||||
* a 64-bit value as an argument, but have to split the value in two
|
||||
* 32-bit halves.
|
||||
*/
|
||||
kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
|
||||
kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
|
||||
}
|
||||
|
||||
static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
|
||||
@ -571,6 +561,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
struct timer_map map;
|
||||
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
|
||||
|
||||
if (unlikely(!timer->enabled))
|
||||
return;
|
||||
@ -593,7 +584,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
if (map.emul_ptimer)
|
||||
soft_timer_cancel(&map.emul_ptimer->hrtimer);
|
||||
|
||||
if (swait_active(kvm_arch_vcpu_wq(vcpu)))
|
||||
if (rcuwait_active(wait))
|
||||
kvm_timer_blocking(vcpu);
|
||||
|
||||
/*
|
@ -22,7 +22,7 @@
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
#include "trace_arm.h"
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/ptrace.h>
|
||||
@ -95,6 +95,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_arm_default_max_vcpus(void)
|
||||
{
|
||||
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_init_vm - initializes a VM data structure
|
||||
* @kvm: pointer to the KVM struct
|
||||
@ -128,8 +133,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
kvm->arch.vmid.vmid_gen = 0;
|
||||
|
||||
/* The maximum number of VCPUs is limited by the host's GIC model */
|
||||
kvm->arch.max_vcpus = vgic_present ?
|
||||
kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
|
||||
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
|
||||
|
||||
return ret;
|
||||
out_free_stage2_pgd:
|
||||
@ -204,10 +208,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = num_online_cpus();
|
||||
break;
|
||||
case KVM_CAP_MAX_VCPUS:
|
||||
r = KVM_MAX_VCPUS;
|
||||
break;
|
||||
case KVM_CAP_MAX_VCPU_ID:
|
||||
r = KVM_MAX_VCPU_ID;
|
||||
if (kvm)
|
||||
r = kvm->arch.max_vcpus;
|
||||
else
|
||||
r = kvm_arm_default_max_vcpus();
|
||||
break;
|
||||
case KVM_CAP_MSI_DEVID:
|
||||
if (!kvm)
|
||||
@ -455,9 +460,9 @@ void force_vm_exit(const cpumask_t *mask)
|
||||
*
|
||||
* The hardware supports a limited set of values with the value zero reserved
|
||||
* for the host, so we check if an assigned value belongs to a previous
|
||||
* generation, which which requires us to assign a new value. If we're the
|
||||
* first to use a VMID for the new generation, we must flush necessary caches
|
||||
* and TLBs on all CPUs.
|
||||
* generation, which requires us to assign a new value. If we're the first to
|
||||
* use a VMID for the new generation, we must flush necessary caches and TLBs
|
||||
* on all CPUs.
|
||||
*/
|
||||
static bool need_new_vmid_gen(struct kvm_vmid *vmid)
|
||||
{
|
||||
@ -579,16 +584,17 @@ void kvm_arm_resume_guest(struct kvm *kvm)
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
vcpu->arch.pause = false;
|
||||
swake_up_one(kvm_arch_vcpu_wq(vcpu));
|
||||
rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
|
||||
}
|
||||
}
|
||||
|
||||
static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
|
||||
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
|
||||
|
||||
swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
|
||||
(!vcpu->arch.pause)));
|
||||
rcuwait_wait_event(wait,
|
||||
(!vcpu->arch.power_off) &&(!vcpu->arch.pause),
|
||||
TASK_INTERRUPTIBLE);
|
||||
|
||||
if (vcpu->arch.power_off || vcpu->arch.pause) {
|
||||
/* Awaken to handle a signal, request we sleep again later. */
|
||||
@ -639,7 +645,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
|
||||
/**
|
||||
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
|
||||
* @vcpu: The VCPU pointer
|
||||
* @run: The kvm_run structure pointer used for userspace state exchange
|
||||
*
|
||||
* This function is called through the VCPU_RUN ioctl called from user space. It
|
||||
* will execute VM code in a loop until the time slice for the process is used
|
||||
@ -647,8 +652,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
|
||||
* return with return value 0 and with the kvm_run structure filled in with the
|
||||
* required data for the requested emulation.
|
||||
*/
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!kvm_vcpu_initialized(vcpu)))
|
||||
@ -659,7 +665,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
return ret;
|
||||
|
||||
if (run->exit_reason == KVM_EXIT_MMIO) {
|
||||
ret = kvm_handle_mmio_return(vcpu, vcpu->run);
|
||||
ret = kvm_handle_mmio_return(vcpu, run);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -983,8 +989,11 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
||||
/*
|
||||
* Ensure a rebooted VM will fault in RAM pages and detect if the
|
||||
* guest MMU is turned off and flush the caches as needed.
|
||||
*
|
||||
* S2FWB enforces all memory accesses to RAM being cacheable, we
|
||||
* ensure that the cache is always coherent.
|
||||
*/
|
||||
if (vcpu->arch.has_run_once)
|
||||
if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
|
||||
stage2_unmap_vm(vcpu->kvm);
|
||||
|
||||
vcpu_reset_hcr(vcpu);
|
||||
@ -1265,19 +1274,41 @@ static void cpu_init_hyp_mode(void)
|
||||
{
|
||||
phys_addr_t pgd_ptr;
|
||||
unsigned long hyp_stack_ptr;
|
||||
unsigned long stack_page;
|
||||
unsigned long vector_ptr;
|
||||
unsigned long tpidr_el2;
|
||||
|
||||
/* Switch from the HYP stub to our own HYP init vector */
|
||||
__hyp_set_vectors(kvm_get_idmap_vector());
|
||||
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
* kernel's mapping to the linear mapping, and store it in tpidr_el2
|
||||
* so that we can use adr_l to access per-cpu variables in EL2.
|
||||
*/
|
||||
tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
|
||||
(unsigned long)kvm_ksym_ref(kvm_host_data));
|
||||
|
||||
pgd_ptr = kvm_mmu_get_httbr();
|
||||
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
|
||||
hyp_stack_ptr = stack_page + PAGE_SIZE;
|
||||
hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
|
||||
vector_ptr = (unsigned long)kvm_get_hyp_vector();
|
||||
|
||||
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||
__cpu_init_stage2();
|
||||
/*
|
||||
* Call initialization code, and switch to the full blown HYP code.
|
||||
* If the cpucaps haven't been finalized yet, something has gone very
|
||||
* wrong, and hyp will crash and burn when it uses any
|
||||
* cpus_have_const_cap() wrapper.
|
||||
*/
|
||||
BUG_ON(!system_capabilities_finalized());
|
||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
|
||||
|
||||
/*
|
||||
* Disabling SSBD on a non-VHE system requires us to enable SSBS
|
||||
* at EL2.
|
||||
*/
|
||||
if (this_cpu_has_cap(ARM64_SSBS) &&
|
||||
arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
|
||||
kvm_call_hyp(__kvm_enable_ssbs);
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_hyp_reset(void)
|
@ -29,20 +29,19 @@
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
|
||||
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
|
||||
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
VCPU_STAT(halt_successful_poll),
|
||||
VCPU_STAT(halt_attempted_poll),
|
||||
VCPU_STAT(halt_poll_invalid),
|
||||
VCPU_STAT(halt_wakeup),
|
||||
VCPU_STAT(hvc_exit_stat),
|
||||
VCPU_STAT(wfe_exit_stat),
|
||||
VCPU_STAT(wfi_exit_stat),
|
||||
VCPU_STAT(mmio_exit_user),
|
||||
VCPU_STAT(mmio_exit_kernel),
|
||||
VCPU_STAT(exits),
|
||||
VCPU_STAT("halt_successful_poll", halt_successful_poll),
|
||||
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
|
||||
VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
|
||||
VCPU_STAT("halt_wakeup", halt_wakeup),
|
||||
VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
|
||||
VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
|
||||
VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
|
||||
VCPU_STAT("mmio_exit_user", mmio_exit_user),
|
||||
VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
|
||||
VCPU_STAT("exits", exits),
|
||||
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
|
||||
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
@ -267,7 +266,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
/*
|
||||
* Vector lengths supported by the host can't currently be
|
||||
* hidden from the guest individually: instead we can only set a
|
||||
* maxmium via ZCR_EL2.LEN. So, make sure the available vector
|
||||
* maximum via ZCR_EL2.LEN. So, make sure the available vector
|
||||
* lengths match the set requested exactly up to the requested
|
||||
* maximum:
|
||||
*/
|
||||
@ -337,7 +336,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
|
||||
unsigned int reg_num;
|
||||
|
||||
unsigned int reqoffset, reqlen; /* User-requested offset and length */
|
||||
unsigned int maxlen; /* Maxmimum permitted length */
|
||||
unsigned int maxlen; /* Maximum permitted length */
|
||||
|
||||
size_t sve_state_size;
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
#include "trace_handle_exit.h"
|
||||
|
||||
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
|
||||
|
||||
|
@ -6,20 +6,10 @@
|
||||
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \
|
||||
$(DISABLE_STACKLEAK_PLUGIN)
|
||||
|
||||
KVM=../../../../virt/kvm
|
||||
obj-$(CONFIG_KVM) += hyp.o
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
|
||||
hyp-y := vgic-v3-sr.o timer-sr.o aarch32.o vgic-v2-cpuif-proxy.o sysreg-sr.o \
|
||||
debug-sr.o entry.o switch.o fpsimd.o tlb.o hyp-entry.o
|
||||
|
||||
# KVM code is run at a different exception code with a different map, so
|
||||
# compiler instrumentation that inserts callbacks or checks into the code may
|
||||
|
@ -270,8 +270,8 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
|
||||
static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
||||
__vgic_v3_save_state(vcpu);
|
||||
__vgic_v3_deactivate_traps(vcpu);
|
||||
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||
}
|
||||
}
|
||||
|
||||
@ -279,8 +279,8 @@ static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
|
||||
static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
||||
__vgic_v3_activate_traps(vcpu);
|
||||
__vgic_v3_restore_state(vcpu);
|
||||
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,9 +10,8 @@
|
||||
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
|
||||
void __hyp_text __kvm_timer_set_cntvoff(u64 cntvoff)
|
||||
{
|
||||
u64 cntvoff = (u64)cntvoff_high << 32 | cntvoff_low;
|
||||
write_sysreg(cntvoff, cntvoff_el2);
|
||||
}
|
||||
|
@ -194,10 +194,9 @@ static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
|
||||
return val;
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
||||
void __hyp_text __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
||||
u64 used_lrs = cpu_if->used_lrs;
|
||||
|
||||
/*
|
||||
* Make sure stores to the GIC via the memory mapped interface
|
||||
@ -230,10 +229,9 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
||||
void __hyp_text __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
||||
u64 used_lrs = cpu_if->used_lrs;
|
||||
int i;
|
||||
|
||||
if (used_lrs || cpu_if->its_vpe.its_vm) {
|
||||
@ -257,10 +255,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
|
||||
void __hyp_text __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
/*
|
||||
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
|
||||
* Group0 interrupt (as generated in GICv2 mode) to be
|
||||
@ -306,9 +302,8 @@ void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
|
||||
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
void __hyp_text __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
u64 val;
|
||||
|
||||
if (!cpu_if->vgic_sre) {
|
||||
@ -333,15 +328,11 @@ void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
write_gicreg(0, ICH_HCR_EL2);
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
|
||||
void __hyp_text __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if;
|
||||
u64 val;
|
||||
u32 nr_pre_bits;
|
||||
|
||||
vcpu = kern_hyp_va(vcpu);
|
||||
cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
val = read_gicreg(ICH_VTR_EL2);
|
||||
nr_pre_bits = vtr_to_nr_pre_bits(val);
|
||||
|
||||
@ -370,15 +361,11 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
|
||||
void __hyp_text __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if;
|
||||
u64 val;
|
||||
u32 nr_pre_bits;
|
||||
|
||||
vcpu = kern_hyp_va(vcpu);
|
||||
cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
val = read_gicreg(ICH_VTR_EL2);
|
||||
nr_pre_bits = vtr_to_nr_pre_bits(val);
|
||||
|
||||
@ -431,8 +418,6 @@ void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
|
||||
write_gicreg(vmcr, ICH_VMCR_EL2);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
|
||||
static int __hyp_text __vgic_v3_bpr_min(void)
|
||||
{
|
||||
/* See Pseudocode for VPriorityGroup */
|
||||
@ -453,7 +438,7 @@ static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
|
||||
u32 vmcr,
|
||||
u64 *lr_val)
|
||||
{
|
||||
unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
||||
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
|
||||
u8 priority = GICv3_IDLE_PRIORITY;
|
||||
int i, lr = -1;
|
||||
|
||||
@ -492,7 +477,7 @@ static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
|
||||
static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
|
||||
int intid, u64 *lr_val)
|
||||
{
|
||||
unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
||||
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < used_lrs; i++) {
|
||||
@ -579,7 +564,7 @@ static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
|
||||
|
||||
/*
|
||||
* The priority value is independent of any of the BPR values, so we
|
||||
* normalize it using the minumal BPR value. This guarantees that no
|
||||
* normalize it using the minimal BPR value. This guarantees that no
|
||||
* matter what the guest does with its BPR, we can always set/get the
|
||||
* same value of a priority.
|
||||
*/
|
||||
@ -1126,5 +1111,3 @@ int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
@ -26,28 +26,12 @@ enum exception_type {
|
||||
except_type_serror = 0x180,
|
||||
};
|
||||
|
||||
static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
|
||||
{
|
||||
u64 exc_offset;
|
||||
|
||||
switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
|
||||
case PSR_MODE_EL1t:
|
||||
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
|
||||
break;
|
||||
case PSR_MODE_EL1h:
|
||||
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
|
||||
break;
|
||||
case PSR_MODE_EL0t:
|
||||
exc_offset = LOWER_EL_AArch64_VECTOR;
|
||||
break;
|
||||
default:
|
||||
exc_offset = LOWER_EL_AArch32_VECTOR;
|
||||
}
|
||||
|
||||
return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
|
||||
}
|
||||
|
||||
/*
|
||||
* This performs the exception entry at a given EL (@target_mode), stashing PC
|
||||
* and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
|
||||
* The EL passed to this function *must* be a non-secure, privileged mode with
|
||||
* bit 0 being set (PSTATE.SP == 1).
|
||||
*
|
||||
* When an exception is taken, most PSTATE fields are left unchanged in the
|
||||
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
|
||||
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
|
||||
@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
|
||||
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
|
||||
* MSB to LSB.
|
||||
*/
|
||||
static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
|
||||
static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
|
||||
enum exception_type type)
|
||||
{
|
||||
unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
||||
unsigned long old, new;
|
||||
unsigned long sctlr, vbar, old, new, mode;
|
||||
u64 exc_offset;
|
||||
|
||||
mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
|
||||
|
||||
if (mode == target_mode)
|
||||
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
|
||||
else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
|
||||
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
|
||||
else if (!(mode & PSR_MODE32_BIT))
|
||||
exc_offset = LOWER_EL_AArch64_VECTOR;
|
||||
else
|
||||
exc_offset = LOWER_EL_AArch32_VECTOR;
|
||||
|
||||
switch (target_mode) {
|
||||
case PSR_MODE_EL1h:
|
||||
vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
|
||||
sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
||||
break;
|
||||
default:
|
||||
/* Don't do that */
|
||||
BUG();
|
||||
}
|
||||
|
||||
*vcpu_pc(vcpu) = vbar + exc_offset + type;
|
||||
|
||||
old = *vcpu_cpsr(vcpu);
|
||||
new = 0;
|
||||
@ -105,9 +114,10 @@ static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
|
||||
new |= PSR_I_BIT;
|
||||
new |= PSR_F_BIT;
|
||||
|
||||
new |= PSR_MODE_EL1h;
|
||||
new |= target_mode;
|
||||
|
||||
return new;
|
||||
*vcpu_cpsr(vcpu) = new;
|
||||
vcpu_write_spsr(vcpu, old);
|
||||
}
|
||||
|
||||
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
||||
@ -116,11 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
|
||||
u32 esr = 0;
|
||||
|
||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
|
||||
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
|
||||
vcpu_write_spsr(vcpu, cpsr);
|
||||
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
|
||||
|
||||
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
|
||||
|
||||
@ -148,14 +154,9 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
|
||||
static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
|
||||
|
||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
|
||||
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
|
||||
vcpu_write_spsr(vcpu, cpsr);
|
||||
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
|
||||
|
||||
/*
|
||||
* Build an unknown exception, depending on the instruction
|
||||
|
@ -131,7 +131,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
|
||||
/*
|
||||
* No valid syndrome? Ask userspace for help if it has
|
||||
* voluntered to do so, and bail out otherwise.
|
||||
* volunteered to do so, and bail out otherwise.
|
||||
*/
|
||||
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
|
||||
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
|
@ -422,6 +422,9 @@ static void stage2_flush_memslot(struct kvm *kvm,
|
||||
next = stage2_pgd_addr_end(kvm, addr, end);
|
||||
if (!stage2_pgd_none(kvm, *pgd))
|
||||
stage2_flush_puds(kvm, pgd, addr, next);
|
||||
|
||||
if (next != end)
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
@ -784,7 +787,7 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
|
||||
mutex_lock(&kvm_hyp_pgd_mutex);
|
||||
|
||||
/*
|
||||
* This assumes that we we have enough space below the idmap
|
||||
* This assumes that we have enough space below the idmap
|
||||
* page to allocate our VAs. If not, the check below will
|
||||
* kick. A potential alternative would be to detect that
|
||||
* overflow and switch to an allocation above the idmap.
|
||||
@ -964,7 +967,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
|
||||
* stage2_unmap_vm - Unmap Stage-2 RAM mappings
|
||||
* @kvm: The struct kvm pointer
|
||||
*
|
||||
* Go through the memregions and unmap any reguler RAM
|
||||
* Go through the memregions and unmap any regular RAM
|
||||
* backing memory already mapped to the VM.
|
||||
*/
|
||||
void stage2_unmap_vm(struct kvm *kvm)
|
||||
@ -1372,47 +1375,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
|
||||
{
|
||||
kvm_pfn_t pfn = *pfnp;
|
||||
gfn_t gfn = *ipap >> PAGE_SHIFT;
|
||||
|
||||
if (kvm_is_transparent_hugepage(pfn)) {
|
||||
unsigned long mask;
|
||||
/*
|
||||
* The address we faulted on is backed by a transparent huge
|
||||
* page. However, because we map the compound huge page and
|
||||
* not the individual tail page, we need to transfer the
|
||||
* refcount to the head page. We have to be careful that the
|
||||
* THP doesn't start to split while we are adjusting the
|
||||
* refcounts.
|
||||
*
|
||||
* We are sure this doesn't happen, because mmu_notifier_retry
|
||||
* was successful and we are holding the mmu_lock, so if this
|
||||
* THP is trying to split, it will be blocked in the mmu
|
||||
* notifier before touching any of the pages, specifically
|
||||
* before being able to call __split_huge_page_refcount().
|
||||
*
|
||||
* We can therefore safely transfer the refcount from PG_tail
|
||||
* to PG_head and switch the pfn from a tail page to the head
|
||||
* page accordingly.
|
||||
*/
|
||||
mask = PTRS_PER_PMD - 1;
|
||||
VM_BUG_ON((gfn & mask) != (pfn & mask));
|
||||
if (pfn & mask) {
|
||||
*ipap &= PMD_MASK;
|
||||
kvm_release_pfn_clean(pfn);
|
||||
pfn &= ~mask;
|
||||
kvm_get_pfn(pfn);
|
||||
*pfnp = pfn;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_ptes - write protect PMD range
|
||||
* @pmd: pointer to pmd entry
|
||||
@ -1607,6 +1569,10 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
|
||||
hva_t uaddr_start, uaddr_end;
|
||||
size_t size;
|
||||
|
||||
/* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
|
||||
if (map_size == PAGE_SIZE)
|
||||
return true;
|
||||
|
||||
size = memslot->npages * PAGE_SIZE;
|
||||
|
||||
gpa_start = memslot->base_gfn << PAGE_SHIFT;
|
||||
@ -1626,7 +1592,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
|
||||
* |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
|
||||
* +-----+--------------------+--------------------+---+
|
||||
*
|
||||
* memslot->base_gfn << PAGE_SIZE:
|
||||
* memslot->base_gfn << PAGE_SHIFT:
|
||||
* +---+--------------------+--------------------+-----+
|
||||
* |abc|def Stage-2 block | Stage-2 block |tvxyz|
|
||||
* +---+--------------------+--------------------+-----+
|
||||
@ -1656,6 +1622,59 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
|
||||
(hva & ~(map_size - 1)) + map_size <= uaddr_end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the given hva is backed by a transparent huge page (THP) and
|
||||
* whether it can be mapped using block mapping in stage2. If so, adjust
|
||||
* the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
|
||||
* supported. This will need to be updated to support other THP sizes.
|
||||
*
|
||||
* Returns the size of the mapping.
|
||||
*/
|
||||
static unsigned long
|
||||
transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
|
||||
unsigned long hva, kvm_pfn_t *pfnp,
|
||||
phys_addr_t *ipap)
|
||||
{
|
||||
kvm_pfn_t pfn = *pfnp;
|
||||
|
||||
/*
|
||||
* Make sure the adjustment is done only for THP pages. Also make
|
||||
* sure that the HVA and IPA are sufficiently aligned and that the
|
||||
* block map is contained within the memslot.
|
||||
*/
|
||||
if (kvm_is_transparent_hugepage(pfn) &&
|
||||
fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
|
||||
/*
|
||||
* The address we faulted on is backed by a transparent huge
|
||||
* page. However, because we map the compound huge page and
|
||||
* not the individual tail page, we need to transfer the
|
||||
* refcount to the head page. We have to be careful that the
|
||||
* THP doesn't start to split while we are adjusting the
|
||||
* refcounts.
|
||||
*
|
||||
* We are sure this doesn't happen, because mmu_notifier_retry
|
||||
* was successful and we are holding the mmu_lock, so if this
|
||||
* THP is trying to split, it will be blocked in the mmu
|
||||
* notifier before touching any of the pages, specifically
|
||||
* before being able to call __split_huge_page_refcount().
|
||||
*
|
||||
* We can therefore safely transfer the refcount from PG_tail
|
||||
* to PG_head and switch the pfn from a tail page to the head
|
||||
* page accordingly.
|
||||
*/
|
||||
*ipap &= PMD_MASK;
|
||||
kvm_release_pfn_clean(pfn);
|
||||
pfn &= ~(PTRS_PER_PMD - 1);
|
||||
kvm_get_pfn(pfn);
|
||||
*pfnp = pfn;
|
||||
|
||||
return PMD_SIZE;
|
||||
}
|
||||
|
||||
/* Use page mapping if we cannot use block mapping. */
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
|
||||
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm_memory_slot *memslot, unsigned long hva,
|
||||
unsigned long fault_status)
|
||||
@ -1769,20 +1788,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
|
||||
if (vma_pagesize == PAGE_SIZE && !force_pte) {
|
||||
/*
|
||||
* Only PMD_SIZE transparent hugepages(THP) are
|
||||
* currently supported. This code will need to be
|
||||
* updated to support other THP sizes.
|
||||
*
|
||||
* Make sure the host VA and the guest IPA are sufficiently
|
||||
* aligned and that the block is contained within the memslot.
|
||||
*/
|
||||
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
|
||||
transparent_hugepage_adjust(&pfn, &fault_ipa))
|
||||
vma_pagesize = PMD_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are not forced to use page mapping, check if we are
|
||||
* backed by a THP and thus use block mapping if possible.
|
||||
*/
|
||||
if (vma_pagesize == PAGE_SIZE && !force_pte)
|
||||
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
|
||||
&pfn, &fault_ipa);
|
||||
if (writable)
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
|
||||
@ -2185,11 +2197,11 @@ int kvm_mmu_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
|
||||
hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
|
||||
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
|
||||
hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
|
||||
hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
|
||||
hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
|
||||
hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
|
||||
hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
|
||||
|
||||
/*
|
||||
* We rely on the linker script to ensure at build time that the HYP
|
||||
@ -2262,11 +2274,19 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
{
|
||||
/*
|
||||
* At this point memslot has been committed and there is an
|
||||
* allocated dirty_bitmap[], dirty pages will be be tracked while the
|
||||
* allocated dirty_bitmap[], dirty pages will be tracked while the
|
||||
* memory slot is write protected.
|
||||
*/
|
||||
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
|
||||
kvm_mmu_wp_memory_region(kvm, mem->slot);
|
||||
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
||||
/*
|
||||
* If we're with initial-all-set, we don't need to write
|
||||
* protect any pages because they're all reported as dirty.
|
||||
* Huge pages and normal pages will be write protect gradually.
|
||||
*/
|
||||
if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
|
||||
kvm_mmu_wp_memory_region(kvm, mem->slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
@ -94,7 +94,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
|
||||
/*
|
||||
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
||||
* the general puspose registers are undefined upon CPU_ON.
|
||||
* the general purpose registers are undefined upon CPU_ON.
|
||||
*/
|
||||
reset_state->r0 = smccc_get_arg3(source_vcpu);
|
||||
|
||||
@ -265,10 +265,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
case PSCI_0_2_FN_SYSTEM_OFF:
|
||||
kvm_psci_system_off(vcpu);
|
||||
/*
|
||||
* We should'nt be going back to guest VCPU after
|
||||
* We shouldn't be going back to guest VCPU after
|
||||
* receiving SYSTEM_OFF request.
|
||||
*
|
||||
* If user space accidently/deliberately resumes
|
||||
* If user space accidentally/deliberately resumes
|
||||
* guest VCPU after SYSTEM_OFF request then guest
|
||||
* VCPU should see internal failure from PSCI return
|
||||
* value. To achieve this, we preload r0 (or x0) with
|
@ -36,15 +36,11 @@ static u32 kvm_ipa_limit;
|
||||
/*
|
||||
* ARMv8 Reset Values
|
||||
*/
|
||||
static const struct kvm_regs default_regs_reset = {
|
||||
.regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
|
||||
PSR_F_BIT | PSR_D_BIT),
|
||||
};
|
||||
#define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
|
||||
PSR_F_BIT | PSR_D_BIT)
|
||||
|
||||
static const struct kvm_regs default_regs_reset32 = {
|
||||
.regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
|
||||
PSR_AA32_I_BIT | PSR_AA32_F_BIT),
|
||||
};
|
||||
#define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
|
||||
PSR_AA32_I_BIT | PSR_AA32_F_BIT)
|
||||
|
||||
/**
|
||||
* kvm_arch_vm_ioctl_check_extension
|
||||
@ -155,7 +151,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
|
||||
vl = vcpu->arch.sve_max_vl;
|
||||
|
||||
/*
|
||||
* Resposibility for these properties is shared between
|
||||
* Responsibility for these properties is shared between
|
||||
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
|
||||
* set_sve_vls(). Double-check here just to be sure:
|
||||
*/
|
||||
@ -241,7 +237,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
* ioctl or as part of handling a request issued by another VCPU in the PSCI
|
||||
* handling code. In the first case, the VCPU will not be loaded, and in the
|
||||
* second case the VCPU will be loaded. Because this function operates purely
|
||||
* on the memory-backed valus of system registers, we want to do a full put if
|
||||
* on the memory-backed values of system registers, we want to do a full put if
|
||||
* we were loaded (handling a request) and load the values back at the end of
|
||||
* the function. Otherwise we leave the state alone. In both cases, we
|
||||
* disable preemption around the vcpu reset as we would otherwise race with
|
||||
@ -249,9 +245,9 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const struct kvm_regs *cpu_reset;
|
||||
int ret = -EINVAL;
|
||||
bool loaded;
|
||||
u32 pstate;
|
||||
|
||||
/* Reset PMU outside of the non-preemptible section */
|
||||
kvm_pmu_vcpu_reset(vcpu);
|
||||
@ -282,16 +278,17 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
|
||||
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
|
||||
goto out;
|
||||
cpu_reset = &default_regs_reset32;
|
||||
pstate = VCPU_RESET_PSTATE_SVC;
|
||||
} else {
|
||||
cpu_reset = &default_regs_reset;
|
||||
pstate = VCPU_RESET_PSTATE_EL1;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/* Reset core registers */
|
||||
memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
|
||||
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
|
||||
vcpu_gp_regs(vcpu)->regs.pstate = pstate;
|
||||
|
||||
/* Reset system registers */
|
||||
kvm_reset_sys_regs(vcpu);
|
||||
@ -388,7 +385,7 @@ int kvm_set_ipa_limit(void)
|
||||
*
|
||||
* So clamp the ipa limit further down to limit the number of levels.
|
||||
* Since we can concatenate upto 16 tables at entry level, we could
|
||||
* go upto 4bits above the maximum VA addressible with the current
|
||||
* go upto 4bits above the maximum VA addressable with the current
|
||||
* number of levels.
|
||||
*/
|
||||
va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "trace.h"
|
||||
|
||||
/*
|
||||
* All of this file is extremly similar to the ARM coproc.c, but the
|
||||
* All of this file is extremely similar to the ARM coproc.c, but the
|
||||
* types are different. My gut feeling is that it should be pretty
|
||||
* easy to merge, but that would be an ABI breakage -- again. VFP
|
||||
* would also need to be abstracted.
|
||||
@ -64,11 +64,8 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu,
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
|
||||
{
|
||||
if (!vcpu->arch.sysregs_loaded_on_cpu)
|
||||
goto immediate_read;
|
||||
|
||||
/*
|
||||
* System registers listed in the switch are not saved on every
|
||||
* exit from the guest but are only saved on vcpu_put.
|
||||
@ -79,75 +76,92 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
* thread when emulating cross-VCPU communication.
|
||||
*/
|
||||
switch (reg) {
|
||||
case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
|
||||
case SCTLR_EL1: return read_sysreg_s(SYS_SCTLR_EL12);
|
||||
case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
|
||||
case CPACR_EL1: return read_sysreg_s(SYS_CPACR_EL12);
|
||||
case TTBR0_EL1: return read_sysreg_s(SYS_TTBR0_EL12);
|
||||
case TTBR1_EL1: return read_sysreg_s(SYS_TTBR1_EL12);
|
||||
case TCR_EL1: return read_sysreg_s(SYS_TCR_EL12);
|
||||
case ESR_EL1: return read_sysreg_s(SYS_ESR_EL12);
|
||||
case AFSR0_EL1: return read_sysreg_s(SYS_AFSR0_EL12);
|
||||
case AFSR1_EL1: return read_sysreg_s(SYS_AFSR1_EL12);
|
||||
case FAR_EL1: return read_sysreg_s(SYS_FAR_EL12);
|
||||
case MAIR_EL1: return read_sysreg_s(SYS_MAIR_EL12);
|
||||
case VBAR_EL1: return read_sysreg_s(SYS_VBAR_EL12);
|
||||
case CONTEXTIDR_EL1: return read_sysreg_s(SYS_CONTEXTIDR_EL12);
|
||||
case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
|
||||
case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
|
||||
case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
|
||||
case AMAIR_EL1: return read_sysreg_s(SYS_AMAIR_EL12);
|
||||
case CNTKCTL_EL1: return read_sysreg_s(SYS_CNTKCTL_EL12);
|
||||
case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
|
||||
case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
|
||||
case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
|
||||
case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
|
||||
case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
|
||||
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
|
||||
case ACTLR_EL1: *val = read_sysreg_s(SYS_ACTLR_EL1); break;
|
||||
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
|
||||
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
|
||||
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
|
||||
case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
|
||||
case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
|
||||
case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
|
||||
case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
|
||||
case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
|
||||
case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
|
||||
case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
|
||||
case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
|
||||
case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
|
||||
case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
|
||||
case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
|
||||
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
|
||||
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
|
||||
case PAR_EL1: *val = read_sysreg_s(SYS_PAR_EL1); break;
|
||||
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
|
||||
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
|
||||
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
immediate_read:
|
||||
return __vcpu_sys_reg(vcpu, reg);
|
||||
return true;
|
||||
}
|
||||
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
||||
static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
|
||||
{
|
||||
if (!vcpu->arch.sysregs_loaded_on_cpu)
|
||||
goto immediate_write;
|
||||
|
||||
/*
|
||||
* System registers listed in the switch are not restored on every
|
||||
* entry to the guest but are only restored on vcpu_load.
|
||||
*
|
||||
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
||||
* should never be listed below, because the the MPIDR should only be
|
||||
* set once, before running the VCPU, and never changed later.
|
||||
* should never be listed below, because the MPIDR should only be set
|
||||
* once, before running the VCPU, and never changed later.
|
||||
*/
|
||||
switch (reg) {
|
||||
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
|
||||
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); return;
|
||||
case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
|
||||
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); return;
|
||||
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); return;
|
||||
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); return;
|
||||
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); return;
|
||||
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); return;
|
||||
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); return;
|
||||
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); return;
|
||||
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); return;
|
||||
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); return;
|
||||
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); return;
|
||||
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return;
|
||||
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
|
||||
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
|
||||
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
|
||||
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); return;
|
||||
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); return;
|
||||
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
|
||||
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
|
||||
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
|
||||
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
|
||||
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
|
||||
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
|
||||
case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); break;
|
||||
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
|
||||
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
|
||||
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
|
||||
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
|
||||
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
|
||||
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
|
||||
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
|
||||
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
|
||||
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
|
||||
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
|
||||
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
|
||||
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
|
||||
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
|
||||
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
|
||||
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
|
||||
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
|
||||
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
|
||||
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
|
||||
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
|
||||
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
immediate_write:
|
||||
return true;
|
||||
}
|
||||
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
{
|
||||
u64 val = 0x8badf00d8badf00d;
|
||||
|
||||
if (vcpu->arch.sysregs_loaded_on_cpu &&
|
||||
__vcpu_read_sys_reg_from_cpu(reg, &val))
|
||||
return val;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
||||
{
|
||||
if (vcpu->arch.sysregs_loaded_on_cpu &&
|
||||
__vcpu_write_sys_reg_to_cpu(val, reg))
|
||||
return;
|
||||
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
}
|
||||
|
||||
@ -1532,7 +1546,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
|
||||
|
||||
{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
|
||||
{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
|
||||
{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
|
||||
|
||||
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
||||
@ -1571,8 +1585,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
|
||||
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
|
||||
{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
|
||||
{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
|
||||
@ -2073,12 +2087,37 @@ static const struct sys_reg_desc cp15_64_regs[] = {
|
||||
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
|
||||
};
|
||||
|
||||
static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
|
||||
bool is_32)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (!is_32 && table[i].reg && !table[i].reset) {
|
||||
kvm_err("sys_reg table %p entry %d has lacks reset\n",
|
||||
table, i);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
|
||||
kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Target specific emulation tables */
|
||||
static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
|
||||
|
||||
void kvm_register_target_sys_reg_table(unsigned int target,
|
||||
struct kvm_sys_reg_target_table *table)
|
||||
{
|
||||
if (check_sysreg_table(table->table64.table, table->table64.num, false) ||
|
||||
check_sysreg_table(table->table32.table, table->table32.num, true))
|
||||
return;
|
||||
|
||||
target_tables[target] = table;
|
||||
}
|
||||
|
||||
@ -2364,19 +2403,13 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *table, size_t num,
|
||||
unsigned long *bmap)
|
||||
const struct sys_reg_desc *table, size_t num)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
if (table[i].reset) {
|
||||
int reg = table[i].reg;
|
||||
|
||||
if (table[i].reset)
|
||||
table[i].reset(vcpu, &table[i]);
|
||||
if (reg > 0 && reg < NR_SYS_REGS)
|
||||
set_bit(reg, bmap);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2832,32 +2865,18 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
return write_demux_regids(uindices);
|
||||
}
|
||||
|
||||
static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 1; i < n; i++) {
|
||||
if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
|
||||
kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_sys_reg_table_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
struct sys_reg_desc clidr;
|
||||
|
||||
/* Make sure tables are unique and in order. */
|
||||
BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
|
||||
BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
|
||||
BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
|
||||
BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
|
||||
BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
|
||||
BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
|
||||
BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false));
|
||||
BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true));
|
||||
BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true));
|
||||
BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true));
|
||||
BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true));
|
||||
BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false));
|
||||
|
||||
/* We abuse the reset function to overwrite the table itself. */
|
||||
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
|
||||
@ -2893,17 +2912,10 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
size_t num;
|
||||
const struct sys_reg_desc *table;
|
||||
DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
|
||||
|
||||
/* Generic chip reset first (so target could override). */
|
||||
reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
|
||||
reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
||||
|
||||
table = get_target_table(vcpu->arch.target, true, &num);
|
||||
reset_sys_reg_descs(vcpu, table, num, bmap);
|
||||
|
||||
for (num = 1; num < NR_SYS_REGS; num++) {
|
||||
if (WARN(!test_bit(num, bmap),
|
||||
"Didn't reset __vcpu_sys_reg(%zi)\n", num))
|
||||
break;
|
||||
}
|
||||
reset_sys_reg_descs(vcpu, table, num);
|
||||
}
|
||||
|
@ -1,216 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#if !defined(_TRACE_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#ifndef _TRACE_ARM64_KVM_H
|
||||
#define _TRACE_ARM64_KVM_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include "sys_regs.h"
|
||||
#include "trace_arm.h"
|
||||
#include "trace_handle_exit.h"
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
|
||||
TRACE_EVENT(kvm_wfx_arm64,
|
||||
TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
|
||||
TP_ARGS(vcpu_pc, is_wfe),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, vcpu_pc)
|
||||
__field(bool, is_wfe)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->is_wfe = is_wfe;
|
||||
),
|
||||
|
||||
TP_printk("guest executed wf%c at: 0x%08lx",
|
||||
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_hvc_arm64,
|
||||
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
|
||||
TP_ARGS(vcpu_pc, r0, imm),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, vcpu_pc)
|
||||
__field(unsigned long, r0)
|
||||
__field(unsigned long, imm)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->r0 = r0;
|
||||
__entry->imm = imm;
|
||||
),
|
||||
|
||||
TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
|
||||
__entry->vcpu_pc, __entry->r0, __entry->imm)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_arm_setup_debug,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
|
||||
TP_ARGS(vcpu, guest_debug),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct kvm_vcpu *, vcpu)
|
||||
__field(__u32, guest_debug)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu = vcpu;
|
||||
__entry->guest_debug = guest_debug;
|
||||
),
|
||||
|
||||
TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_arm_clear_debug,
|
||||
TP_PROTO(__u32 guest_debug),
|
||||
TP_ARGS(guest_debug),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__u32, guest_debug)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->guest_debug = guest_debug;
|
||||
),
|
||||
|
||||
TP_printk("flags: 0x%08x", __entry->guest_debug)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_arm_set_dreg32,
|
||||
TP_PROTO(const char *name, __u32 value),
|
||||
TP_ARGS(name, value),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, name)
|
||||
__field(__u32, value)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->name = name;
|
||||
__entry->value = value;
|
||||
),
|
||||
|
||||
TP_printk("%s: 0x%08x", __entry->name, __entry->value)
|
||||
);
|
||||
|
||||
TRACE_DEFINE_SIZEOF(__u64);
|
||||
|
||||
TRACE_EVENT(kvm_arm_set_regset,
|
||||
TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
|
||||
TP_ARGS(type, len, control, value),
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, name)
|
||||
__field(int, len)
|
||||
__array(u64, ctrls, 16)
|
||||
__array(u64, values, 16)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->name = type;
|
||||
__entry->len = len;
|
||||
memcpy(__entry->ctrls, control, len << 3);
|
||||
memcpy(__entry->values, value, len << 3);
|
||||
),
|
||||
TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name,
|
||||
__print_array(__entry->ctrls, __entry->len, sizeof(__u64)),
|
||||
__print_array(__entry->values, __entry->len, sizeof(__u64)))
|
||||
);
|
||||
|
||||
TRACE_EVENT(trap_reg,
|
||||
TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value),
|
||||
TP_ARGS(fn, reg, is_write, write_value),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, fn)
|
||||
__field(int, reg)
|
||||
__field(bool, is_write)
|
||||
__field(u64, write_value)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->fn = fn;
|
||||
__entry->reg = reg;
|
||||
__entry->is_write = is_write;
|
||||
__entry->write_value = write_value;
|
||||
),
|
||||
|
||||
TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_handle_sys_reg,
|
||||
TP_PROTO(unsigned long hsr),
|
||||
TP_ARGS(hsr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, hsr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hsr = hsr;
|
||||
),
|
||||
|
||||
TP_printk("HSR 0x%08lx", __entry->hsr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_sys_access,
|
||||
TP_PROTO(unsigned long vcpu_pc, struct sys_reg_params *params, const struct sys_reg_desc *reg),
|
||||
TP_ARGS(vcpu_pc, params, reg),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, vcpu_pc)
|
||||
__field(bool, is_write)
|
||||
__field(const char *, name)
|
||||
__field(u8, Op0)
|
||||
__field(u8, Op1)
|
||||
__field(u8, CRn)
|
||||
__field(u8, CRm)
|
||||
__field(u8, Op2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->is_write = params->is_write;
|
||||
__entry->name = reg->name;
|
||||
__entry->Op0 = reg->Op0;
|
||||
__entry->Op0 = reg->Op0;
|
||||
__entry->Op1 = reg->Op1;
|
||||
__entry->CRn = reg->CRn;
|
||||
__entry->CRm = reg->CRm;
|
||||
__entry->Op2 = reg->Op2;
|
||||
),
|
||||
|
||||
TP_printk("PC: %lx %s (%d,%d,%d,%d,%d) %s",
|
||||
__entry->vcpu_pc, __entry->name ?: "UNKN",
|
||||
__entry->Op0, __entry->Op1, __entry->CRn,
|
||||
__entry->CRm, __entry->Op2,
|
||||
__entry->is_write ? "write" : "read")
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_set_guest_debug,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
|
||||
TP_ARGS(vcpu, guest_debug),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct kvm_vcpu *, vcpu)
|
||||
__field(__u32, guest_debug)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu = vcpu;
|
||||
__entry->guest_debug = guest_debug;
|
||||
),
|
||||
|
||||
TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
|
||||
);
|
||||
|
||||
|
||||
#endif /* _TRACE_ARM64_KVM_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
#endif /* _TRACE_ARM64_KVM_H */
|
||||
|
@ -1,10 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_KVM_H
|
||||
#if !defined(_TRACE_ARM_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_ARM_ARM64_KVM_H
|
||||
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
@ -368,12 +367,12 @@ TRACE_EVENT(kvm_timer_emulate,
|
||||
__entry->timer_idx, __entry->should_fire)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
#endif /* _TRACE_ARM_ARM64_KVM_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH ../../virt/kvm/arm
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
#define TRACE_INCLUDE_FILE trace_arm
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
215
arch/arm64/kvm/trace_handle_exit.h
Normal file
215
arch/arm64/kvm/trace_handle_exit.h
Normal file
@ -0,0 +1,215 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#if !defined(_TRACE_HANDLE_EXIT_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HANDLE_EXIT_ARM64_KVM_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include "sys_regs.h"
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
|
||||
TRACE_EVENT(kvm_wfx_arm64,
|
||||
TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
|
||||
TP_ARGS(vcpu_pc, is_wfe),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, vcpu_pc)
|
||||
__field(bool, is_wfe)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->is_wfe = is_wfe;
|
||||
),
|
||||
|
||||
TP_printk("guest executed wf%c at: 0x%08lx",
|
||||
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_hvc_arm64,
|
||||
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
|
||||
TP_ARGS(vcpu_pc, r0, imm),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, vcpu_pc)
|
||||
__field(unsigned long, r0)
|
||||
__field(unsigned long, imm)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->r0 = r0;
|
||||
__entry->imm = imm;
|
||||
),
|
||||
|
||||
TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
|
||||
__entry->vcpu_pc, __entry->r0, __entry->imm)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_arm_setup_debug,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
|
||||
TP_ARGS(vcpu, guest_debug),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct kvm_vcpu *, vcpu)
|
||||
__field(__u32, guest_debug)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu = vcpu;
|
||||
__entry->guest_debug = guest_debug;
|
||||
),
|
||||
|
||||
TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_arm_clear_debug,
|
||||
TP_PROTO(__u32 guest_debug),
|
||||
TP_ARGS(guest_debug),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__u32, guest_debug)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->guest_debug = guest_debug;
|
||||
),
|
||||
|
||||
TP_printk("flags: 0x%08x", __entry->guest_debug)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_arm_set_dreg32,
|
||||
TP_PROTO(const char *name, __u32 value),
|
||||
TP_ARGS(name, value),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, name)
|
||||
__field(__u32, value)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->name = name;
|
||||
__entry->value = value;
|
||||
),
|
||||
|
||||
TP_printk("%s: 0x%08x", __entry->name, __entry->value)
|
||||
);
|
||||
|
||||
TRACE_DEFINE_SIZEOF(__u64);
|
||||
|
||||
TRACE_EVENT(kvm_arm_set_regset,
|
||||
TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
|
||||
TP_ARGS(type, len, control, value),
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, name)
|
||||
__field(int, len)
|
||||
__array(u64, ctrls, 16)
|
||||
__array(u64, values, 16)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->name = type;
|
||||
__entry->len = len;
|
||||
memcpy(__entry->ctrls, control, len << 3);
|
||||
memcpy(__entry->values, value, len << 3);
|
||||
),
|
||||
TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name,
|
||||
__print_array(__entry->ctrls, __entry->len, sizeof(__u64)),
|
||||
__print_array(__entry->values, __entry->len, sizeof(__u64)))
|
||||
);
|
||||
|
||||
TRACE_EVENT(trap_reg,
|
||||
TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value),
|
||||
TP_ARGS(fn, reg, is_write, write_value),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *, fn)
|
||||
__field(int, reg)
|
||||
__field(bool, is_write)
|
||||
__field(u64, write_value)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->fn = fn;
|
||||
__entry->reg = reg;
|
||||
__entry->is_write = is_write;
|
||||
__entry->write_value = write_value;
|
||||
),
|
||||
|
||||
TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_handle_sys_reg,
|
||||
TP_PROTO(unsigned long hsr),
|
||||
TP_ARGS(hsr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, hsr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hsr = hsr;
|
||||
),
|
||||
|
||||
TP_printk("HSR 0x%08lx", __entry->hsr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_sys_access,
|
||||
TP_PROTO(unsigned long vcpu_pc, struct sys_reg_params *params, const struct sys_reg_desc *reg),
|
||||
TP_ARGS(vcpu_pc, params, reg),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, vcpu_pc)
|
||||
__field(bool, is_write)
|
||||
__field(const char *, name)
|
||||
__field(u8, Op0)
|
||||
__field(u8, Op1)
|
||||
__field(u8, CRn)
|
||||
__field(u8, CRm)
|
||||
__field(u8, Op2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->is_write = params->is_write;
|
||||
__entry->name = reg->name;
|
||||
__entry->Op0 = reg->Op0;
|
||||
__entry->Op0 = reg->Op0;
|
||||
__entry->Op1 = reg->Op1;
|
||||
__entry->CRn = reg->CRn;
|
||||
__entry->CRm = reg->CRm;
|
||||
__entry->Op2 = reg->Op2;
|
||||
),
|
||||
|
||||
TP_printk("PC: %lx %s (%d,%d,%d,%d,%d) %s",
|
||||
__entry->vcpu_pc, __entry->name ?: "UNKN",
|
||||
__entry->Op0, __entry->Op1, __entry->CRn,
|
||||
__entry->CRm, __entry->Op2,
|
||||
__entry->is_write ? "write" : "read")
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_set_guest_debug,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
|
||||
TP_ARGS(vcpu, guest_debug),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct kvm_vcpu *, vcpu)
|
||||
__field(__u32, guest_debug)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu = vcpu;
|
||||
__entry->guest_debug = guest_debug;
|
||||
),
|
||||
|
||||
TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_HANDLE_EXIT_ARM64_KVM_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace_handle_exit
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -7,7 +7,7 @@
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include "vgic.h"
|
||||
#include "vgic/vgic.h"
|
||||
#include "sys_regs.h"
|
||||
|
||||
static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
|
@ -30,7 +30,7 @@ TRACE_EVENT(vgic_update_irq_pending,
|
||||
#endif /* _TRACE_VGIC_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH ../../virt/kvm/arm/vgic
|
||||
#define TRACE_INCLUDE_PATH ../../arch/arm64/kvm/vgic
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
@ -56,7 +56,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||
|
||||
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
|
||||
|
||||
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
|
||||
for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
|
||||
u32 val = cpuif->vgic_lr[lr];
|
||||
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
|
||||
struct vgic_irq *irq;
|
||||
@ -120,7 +120,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
vgic_cpu->used_lrs = 0;
|
||||
cpuif->used_lrs = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -427,7 +427,7 @@ out:
|
||||
static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
||||
u64 used_lrs = cpu_if->used_lrs;
|
||||
u64 elrsr;
|
||||
int i;
|
||||
|
||||
@ -448,7 +448,7 @@ static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
|
||||
void vgic_v2_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
void __iomem *base = kvm_vgic_global_state.vctrl_base;
|
||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
||||
u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
|
||||
|
||||
if (!base)
|
||||
return;
|
||||
@ -463,7 +463,7 @@ void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
void __iomem *base = kvm_vgic_global_state.vctrl_base;
|
||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
||||
u64 used_lrs = cpu_if->used_lrs;
|
||||
int i;
|
||||
|
||||
if (!base)
|
@ -39,7 +39,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||
|
||||
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
|
||||
|
||||
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
|
||||
for (lr = 0; lr < cpuif->used_lrs; lr++) {
|
||||
u64 val = cpuif->vgic_lr[lr];
|
||||
u32 intid, cpuid;
|
||||
struct vgic_irq *irq;
|
||||
@ -111,7 +111,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
vgic_cpu->used_lrs = 0;
|
||||
cpuif->used_lrs = 0;
|
||||
}
|
||||
|
||||
/* Requires the irq to be locked already */
|
||||
@ -587,7 +587,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The ListRegs field is 5 bits, but there is a architectural
|
||||
* The ListRegs field is 5 bits, but there is an architectural
|
||||
* maximum of 16 list registers. Just ignore bit 4...
|
||||
*/
|
||||
kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
|
||||
@ -630,12 +630,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||
if (kvm_vgic_global_state.vcpu_base == 0)
|
||||
kvm_info("disabling GICv2 emulation\n");
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
|
||||
group0_trap = true;
|
||||
group1_trap = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (group0_trap || group1_trap || common_trap) {
|
||||
kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
|
||||
@ -664,10 +662,10 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
|
||||
if (likely(cpu_if->vgic_sre))
|
||||
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
|
||||
|
||||
kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
|
||||
kvm_call_hyp(__vgic_v3_restore_aprs, kern_hyp_va(cpu_if));
|
||||
|
||||
if (has_vhe())
|
||||
__vgic_v3_activate_traps(vcpu);
|
||||
__vgic_v3_activate_traps(cpu_if);
|
||||
|
||||
WARN_ON(vgic_v4_load(vcpu));
|
||||
}
|
||||
@ -682,12 +680,14 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
|
||||
|
||||
void vgic_v3_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
WARN_ON(vgic_v4_put(vcpu, false));
|
||||
|
||||
vgic_v3_vmcr_sync(vcpu);
|
||||
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, kern_hyp_va(cpu_if));
|
||||
|
||||
if (has_vhe())
|
||||
__vgic_v3_deactivate_traps(vcpu);
|
||||
__vgic_v3_deactivate_traps(cpu_if);
|
||||
}
|
@ -786,6 +786,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
|
||||
int count;
|
||||
bool multi_sgi;
|
||||
u8 prio = 0xff;
|
||||
int i = 0;
|
||||
|
||||
lockdep_assert_held(&vgic_cpu->ap_list_lock);
|
||||
|
||||
@ -827,11 +828,14 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
vcpu->arch.vgic_cpu.used_lrs = count;
|
||||
|
||||
/* Nuke remaining LRs */
|
||||
for ( ; count < kvm_vgic_global_state.nr_lr; count++)
|
||||
vgic_clear_lr(vcpu, count);
|
||||
for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
|
||||
vgic_clear_lr(vcpu, i);
|
||||
|
||||
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
|
||||
else
|
||||
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
|
||||
}
|
||||
|
||||
static inline bool can_access_vgic_from_kernel(void)
|
||||
@ -849,13 +853,13 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu)
|
||||
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||
vgic_v2_save_state(vcpu);
|
||||
else
|
||||
__vgic_v3_save_state(vcpu);
|
||||
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||
}
|
||||
|
||||
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
|
||||
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
int used_lrs;
|
||||
|
||||
/* An empty ap_list_head implies used_lrs == 0 */
|
||||
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
|
||||
@ -864,7 +868,12 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
if (can_access_vgic_from_kernel())
|
||||
vgic_save_state(vcpu);
|
||||
|
||||
if (vgic_cpu->used_lrs)
|
||||
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||
used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
|
||||
else
|
||||
used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
|
||||
|
||||
if (used_lrs)
|
||||
vgic_fold_lr_state(vcpu);
|
||||
vgic_prune_ap_list(vcpu);
|
||||
}
|
||||
@ -874,7 +883,7 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
|
||||
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||
vgic_v2_restore_state(vcpu);
|
||||
else
|
||||
__vgic_v3_restore_state(vcpu);
|
||||
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||
}
|
||||
|
||||
/* Flush our emulation state into the GIC hardware before entering the guest. */
|
@ -174,6 +174,8 @@ struct kvm_vcpu_stat {
|
||||
#endif
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
u64 halt_poll_success_ns;
|
||||
u64 halt_poll_fail_ns;
|
||||
u64 halt_poll_invalid;
|
||||
u64 halt_wakeup;
|
||||
};
|
||||
|
@ -39,40 +39,41 @@
|
||||
#define VECTORSPACING 0x100 /* for EI/VI mode */
|
||||
#endif
|
||||
|
||||
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
|
||||
{ "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
|
||||
{ "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
|
||||
{ "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
|
||||
{ "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
|
||||
{ "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
|
||||
{ "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
|
||||
{ "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
|
||||
{ "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
|
||||
{ "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
|
||||
{ "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
|
||||
{ "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
|
||||
{ "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
|
||||
{ "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
|
||||
{ "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
|
||||
{ "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
|
||||
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
|
||||
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
|
||||
VCPU_STAT("wait", wait_exits),
|
||||
VCPU_STAT("cache", cache_exits),
|
||||
VCPU_STAT("signal", signal_exits),
|
||||
VCPU_STAT("interrupt", int_exits),
|
||||
VCPU_STAT("cop_unusable", cop_unusable_exits),
|
||||
VCPU_STAT("tlbmod", tlbmod_exits),
|
||||
VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
|
||||
VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
|
||||
VCPU_STAT("addrerr_st", addrerr_st_exits),
|
||||
VCPU_STAT("addrerr_ld", addrerr_ld_exits),
|
||||
VCPU_STAT("syscall", syscall_exits),
|
||||
VCPU_STAT("resvd_inst", resvd_inst_exits),
|
||||
VCPU_STAT("break_inst", break_inst_exits),
|
||||
VCPU_STAT("trap_inst", trap_inst_exits),
|
||||
VCPU_STAT("msa_fpe", msa_fpe_exits),
|
||||
VCPU_STAT("fpe", fpe_exits),
|
||||
VCPU_STAT("msa_disabled", msa_disabled_exits),
|
||||
VCPU_STAT("flush_dcache", flush_dcache_exits),
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
{ "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
|
||||
{ "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
|
||||
{ "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
|
||||
VCPU_STAT("vz_gpsi", vz_gpsi_exits),
|
||||
VCPU_STAT("vz_gsfc", vz_gsfc_exits),
|
||||
VCPU_STAT("vz_hc", vz_hc_exits),
|
||||
VCPU_STAT("vz_grr", vz_grr_exits),
|
||||
VCPU_STAT("vz_gva", vz_gva_exits),
|
||||
VCPU_STAT("vz_ghfc", vz_ghfc_exits),
|
||||
VCPU_STAT("vz_gpa", vz_gpa_exits),
|
||||
VCPU_STAT("vz_resvd", vz_resvd_exits),
|
||||
#endif
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
|
||||
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
|
||||
VCPU_STAT("halt_successful_poll", halt_successful_poll),
|
||||
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
|
||||
VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
|
||||
VCPU_STAT("halt_wakeup", halt_wakeup),
|
||||
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
|
||||
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
|
||||
{NULL}
|
||||
};
|
||||
|
||||
@ -284,8 +285,7 @@ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
|
||||
kvm_mips_callbacks->queue_timer_int(vcpu);
|
||||
|
||||
vcpu->arch.wait = 0;
|
||||
if (swq_has_sleeper(&vcpu->wq))
|
||||
swake_up_one(&vcpu->wq);
|
||||
rcuwait_wake_up(&vcpu->wait);
|
||||
|
||||
return kvm_mips_count_timeout(vcpu);
|
||||
}
|
||||
@ -439,8 +439,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int r = -EINTR;
|
||||
|
||||
vcpu_load(vcpu);
|
||||
@ -511,8 +512,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
||||
|
||||
dvcpu->arch.wait = 0;
|
||||
|
||||
if (swq_has_sleeper(&dvcpu->wq))
|
||||
swake_up_one(&dvcpu->wq);
|
||||
rcuwait_wake_up(&dvcpu->wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ struct kvmppc_vcore {
|
||||
struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
|
||||
struct list_head preempt_list;
|
||||
spinlock_t lock;
|
||||
struct swait_queue_head wq;
|
||||
struct rcuwait wait;
|
||||
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
|
||||
u64 stolen_tb;
|
||||
u64 preempt_tb;
|
||||
|
@ -751,7 +751,7 @@ struct kvm_vcpu_arch {
|
||||
u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
|
||||
u32 last_inst;
|
||||
|
||||
struct swait_queue_head *wqp;
|
||||
struct rcuwait *waitp;
|
||||
struct kvmppc_vcore *vcore;
|
||||
int ret;
|
||||
int trap;
|
||||
|
@ -36,41 +36,38 @@
|
||||
#include "book3s.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
|
||||
#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
|
||||
|
||||
/* #define EXIT_DEBUG */
|
||||
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "exits", VCPU_STAT(sum_exits) },
|
||||
{ "mmio", VCPU_STAT(mmio_exits) },
|
||||
{ "sig", VCPU_STAT(signal_exits) },
|
||||
{ "sysc", VCPU_STAT(syscall_exits) },
|
||||
{ "inst_emu", VCPU_STAT(emulated_inst_exits) },
|
||||
{ "dec", VCPU_STAT(dec_exits) },
|
||||
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
|
||||
{ "queue_intr", VCPU_STAT(queue_intr) },
|
||||
{ "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
|
||||
{ "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
|
||||
{ "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
|
||||
{ "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
|
||||
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "pf_storage", VCPU_STAT(pf_storage) },
|
||||
{ "sp_storage", VCPU_STAT(sp_storage) },
|
||||
{ "pf_instruc", VCPU_STAT(pf_instruc) },
|
||||
{ "sp_instruc", VCPU_STAT(sp_instruc) },
|
||||
{ "ld", VCPU_STAT(ld) },
|
||||
{ "ld_slow", VCPU_STAT(ld_slow) },
|
||||
{ "st", VCPU_STAT(st) },
|
||||
{ "st_slow", VCPU_STAT(st_slow) },
|
||||
{ "pthru_all", VCPU_STAT(pthru_all) },
|
||||
{ "pthru_host", VCPU_STAT(pthru_host) },
|
||||
{ "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
|
||||
{ "largepages_2M", VM_STAT(num_2M_pages, .mode = 0444) },
|
||||
{ "largepages_1G", VM_STAT(num_1G_pages, .mode = 0444) },
|
||||
VCPU_STAT("exits", sum_exits),
|
||||
VCPU_STAT("mmio", mmio_exits),
|
||||
VCPU_STAT("sig", signal_exits),
|
||||
VCPU_STAT("sysc", syscall_exits),
|
||||
VCPU_STAT("inst_emu", emulated_inst_exits),
|
||||
VCPU_STAT("dec", dec_exits),
|
||||
VCPU_STAT("ext_intr", ext_intr_exits),
|
||||
VCPU_STAT("queue_intr", queue_intr),
|
||||
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
|
||||
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
|
||||
VCPU_STAT("halt_wait_ns", halt_wait_ns),
|
||||
VCPU_STAT("halt_successful_poll", halt_successful_poll),
|
||||
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
|
||||
VCPU_STAT("halt_successful_wait", halt_successful_wait),
|
||||
VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
|
||||
VCPU_STAT("halt_wakeup", halt_wakeup),
|
||||
VCPU_STAT("pf_storage", pf_storage),
|
||||
VCPU_STAT("sp_storage", sp_storage),
|
||||
VCPU_STAT("pf_instruc", pf_instruc),
|
||||
VCPU_STAT("sp_instruc", sp_instruc),
|
||||
VCPU_STAT("ld", ld),
|
||||
VCPU_STAT("ld_slow", ld_slow),
|
||||
VCPU_STAT("st", st),
|
||||
VCPU_STAT("st_slow", st_slow),
|
||||
VCPU_STAT("pthru_all", pthru_all),
|
||||
VCPU_STAT("pthru_host", pthru_host),
|
||||
VCPU_STAT("pthru_bad_aff", pthru_bad_aff),
|
||||
VM_STAT("largepages_2M", num_2M_pages, .mode = 0444),
|
||||
VM_STAT("largepages_1G", num_1G_pages, .mode = 0444),
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
|
@ -230,13 +230,11 @@ static bool kvmppc_ipi_thread(int cpu)
|
||||
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu;
|
||||
struct swait_queue_head *wqp;
|
||||
struct rcuwait *waitp;
|
||||
|
||||
wqp = kvm_arch_vcpu_wq(vcpu);
|
||||
if (swq_has_sleeper(wqp)) {
|
||||
swake_up_one(wqp);
|
||||
waitp = kvm_arch_vcpu_get_wait(vcpu);
|
||||
if (rcuwait_wake_up(waitp))
|
||||
++vcpu->stat.halt_wakeup;
|
||||
}
|
||||
|
||||
cpu = READ_ONCE(vcpu->arch.thread_cpu);
|
||||
if (cpu >= 0 && kvmppc_ipi_thread(cpu))
|
||||
@ -2125,7 +2123,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
|
||||
|
||||
spin_lock_init(&vcore->lock);
|
||||
spin_lock_init(&vcore->stoltb_lock);
|
||||
init_swait_queue_head(&vcore->wq);
|
||||
rcuwait_init(&vcore->wait);
|
||||
vcore->preempt_tb = TB_NIL;
|
||||
vcore->lpcr = kvm->arch.lpcr;
|
||||
vcore->first_vcpuid = id;
|
||||
@ -3784,7 +3782,6 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
||||
ktime_t cur, start_poll, start_wait;
|
||||
int do_sleep = 1;
|
||||
u64 block_ns;
|
||||
DECLARE_SWAITQUEUE(wait);
|
||||
|
||||
/* Poll for pending exceptions and ceded state */
|
||||
cur = start_poll = ktime_get();
|
||||
@ -3812,10 +3809,10 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
||||
}
|
||||
}
|
||||
|
||||
prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
prepare_to_rcuwait(&vc->wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (kvmppc_vcore_check_block(vc)) {
|
||||
finish_swait(&vc->wq, &wait);
|
||||
finish_rcuwait(&vc->wait);
|
||||
do_sleep = 0;
|
||||
/* If we polled, count this as a successful poll */
|
||||
if (vc->halt_poll_ns)
|
||||
@ -3829,7 +3826,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
||||
trace_kvmppc_vcore_blocked(vc, 0);
|
||||
spin_unlock(&vc->lock);
|
||||
schedule();
|
||||
finish_swait(&vc->wq, &wait);
|
||||
finish_rcuwait(&vc->wait);
|
||||
spin_lock(&vc->lock);
|
||||
vc->vcore_state = VCORE_INACTIVE;
|
||||
trace_kvmppc_vcore_blocked(vc, 1);
|
||||
@ -3940,7 +3937,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
kvmppc_start_thread(vcpu, vc);
|
||||
trace_kvm_guest_enter(vcpu);
|
||||
} else if (vc->vcore_state == VCORE_SLEEPING) {
|
||||
swake_up_one(&vc->wq);
|
||||
rcuwait_wake_up(&vc->wait);
|
||||
}
|
||||
|
||||
}
|
||||
@ -4279,7 +4276,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
}
|
||||
user_vrsave = mfspr(SPRN_VRSAVE);
|
||||
|
||||
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
|
||||
vcpu->arch.waitp = &vcpu->arch.vcore->wait;
|
||||
vcpu->arch.pgdir = kvm->mm->pgd;
|
||||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||
|
||||
|
@ -35,29 +35,28 @@
|
||||
|
||||
unsigned long kvmppc_booke_handlers;
|
||||
|
||||
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
|
||||
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
|
||||
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "mmio", VCPU_STAT(mmio_exits) },
|
||||
{ "sig", VCPU_STAT(signal_exits) },
|
||||
{ "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
|
||||
{ "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
|
||||
{ "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
|
||||
{ "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
|
||||
{ "sysc", VCPU_STAT(syscall_exits) },
|
||||
{ "isi", VCPU_STAT(isi_exits) },
|
||||
{ "dsi", VCPU_STAT(dsi_exits) },
|
||||
{ "inst_emu", VCPU_STAT(emulated_inst_exits) },
|
||||
{ "dec", VCPU_STAT(dec_exits) },
|
||||
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
|
||||
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "doorbell", VCPU_STAT(dbell_exits) },
|
||||
{ "guest doorbell", VCPU_STAT(gdbell_exits) },
|
||||
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
|
||||
VCPU_STAT("mmio", mmio_exits),
|
||||
VCPU_STAT("sig", signal_exits),
|
||||
VCPU_STAT("itlb_r", itlb_real_miss_exits),
|
||||
VCPU_STAT("itlb_v", itlb_virt_miss_exits),
|
||||
VCPU_STAT("dtlb_r", dtlb_real_miss_exits),
|
||||
VCPU_STAT("dtlb_v", dtlb_virt_miss_exits),
|
||||
VCPU_STAT("sysc", syscall_exits),
|
||||
VCPU_STAT("isi", isi_exits),
|
||||
VCPU_STAT("dsi", dsi_exits),
|
||||
VCPU_STAT("inst_emu", emulated_inst_exits),
|
||||
VCPU_STAT("dec", dec_exits),
|
||||
VCPU_STAT("ext_intr", ext_intr_exits),
|
||||
VCPU_STAT("halt_successful_poll", halt_successful_poll),
|
||||
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
|
||||
VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
|
||||
VCPU_STAT("halt_wakeup", halt_wakeup),
|
||||
VCPU_STAT("doorbell", dbell_exits),
|
||||
VCPU_STAT("guest doorbell", gdbell_exits),
|
||||
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
|
||||
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
|
||||
VM_STAT("remote_tlb_flush", remote_tlb_flush),
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
|
@ -752,7 +752,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
if (err)
|
||||
goto out_vcpu_uninit;
|
||||
|
||||
vcpu->arch.wqp = &vcpu->wq;
|
||||
vcpu->arch.waitp = &vcpu->wait;
|
||||
kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
|
||||
return 0;
|
||||
|
||||
@ -1765,8 +1765,9 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int r;
|
||||
|
||||
vcpu_load(vcpu);
|
||||
|
@ -375,6 +375,8 @@ struct kvm_vcpu_stat {
|
||||
u64 halt_poll_invalid;
|
||||
u64 halt_no_poll_steal;
|
||||
u64 halt_wakeup;
|
||||
u64 halt_poll_success_ns;
|
||||
u64 halt_poll_fail_ns;
|
||||
u64 instruction_lctl;
|
||||
u64 instruction_lctlg;
|
||||
u64 instruction_stctl;
|
||||
@ -971,7 +973,7 @@ struct kvm_arch_async_pf {
|
||||
unsigned long pfault_token;
|
||||
};
|
||||
|
||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
|
||||
struct kvm_async_pf *work);
|
||||
@ -982,6 +984,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
|
||||
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
|
||||
struct kvm_async_pf *work);
|
||||
|
||||
static inline void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
void kvm_arch_crypto_clear_masks(struct kvm *kvm);
|
||||
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
|
||||
unsigned long *aqm, unsigned long *adm);
|
||||
|
@ -3082,7 +3082,7 @@ static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
|
||||
__airqs_kick_single_vcpu(kvm, pending_mask);
|
||||
hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
|
||||
return HRTIMER_RESTART;
|
||||
};
|
||||
}
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
@ -57,110 +57,109 @@
|
||||
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
|
||||
(KVM_MAX_VCPUS + LOCAL_IRQS))
|
||||
|
||||
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
|
||||
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
|
||||
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "userspace_handled", VCPU_STAT(exit_userspace) },
|
||||
{ "exit_null", VCPU_STAT(exit_null) },
|
||||
{ "exit_validity", VCPU_STAT(exit_validity) },
|
||||
{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
|
||||
{ "exit_external_request", VCPU_STAT(exit_external_request) },
|
||||
{ "exit_io_request", VCPU_STAT(exit_io_request) },
|
||||
{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
|
||||
{ "exit_instruction", VCPU_STAT(exit_instruction) },
|
||||
{ "exit_pei", VCPU_STAT(exit_pei) },
|
||||
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
|
||||
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
|
||||
{ "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
|
||||
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
|
||||
{ "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
|
||||
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
|
||||
{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
|
||||
{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
|
||||
{ "deliver_ckc", VCPU_STAT(deliver_ckc) },
|
||||
{ "deliver_cputm", VCPU_STAT(deliver_cputm) },
|
||||
{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
|
||||
{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
|
||||
{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
|
||||
{ "deliver_virtio", VCPU_STAT(deliver_virtio) },
|
||||
{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
|
||||
{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
|
||||
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
|
||||
{ "deliver_program", VCPU_STAT(deliver_program) },
|
||||
{ "deliver_io", VCPU_STAT(deliver_io) },
|
||||
{ "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
|
||||
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
|
||||
{ "inject_ckc", VCPU_STAT(inject_ckc) },
|
||||
{ "inject_cputm", VCPU_STAT(inject_cputm) },
|
||||
{ "inject_external_call", VCPU_STAT(inject_external_call) },
|
||||
{ "inject_float_mchk", VM_STAT(inject_float_mchk) },
|
||||
{ "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
|
||||
{ "inject_io", VM_STAT(inject_io) },
|
||||
{ "inject_mchk", VCPU_STAT(inject_mchk) },
|
||||
{ "inject_pfault_done", VM_STAT(inject_pfault_done) },
|
||||
{ "inject_program", VCPU_STAT(inject_program) },
|
||||
{ "inject_restart", VCPU_STAT(inject_restart) },
|
||||
{ "inject_service_signal", VM_STAT(inject_service_signal) },
|
||||
{ "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
|
||||
{ "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
|
||||
{ "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
|
||||
{ "inject_virtio", VM_STAT(inject_virtio) },
|
||||
{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
|
||||
{ "instruction_gs", VCPU_STAT(instruction_gs) },
|
||||
{ "instruction_io_other", VCPU_STAT(instruction_io_other) },
|
||||
{ "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
|
||||
{ "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
|
||||
{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
|
||||
{ "instruction_ptff", VCPU_STAT(instruction_ptff) },
|
||||
{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
|
||||
{ "instruction_sck", VCPU_STAT(instruction_sck) },
|
||||
{ "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
|
||||
{ "instruction_spx", VCPU_STAT(instruction_spx) },
|
||||
{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
|
||||
{ "instruction_stap", VCPU_STAT(instruction_stap) },
|
||||
{ "instruction_iske", VCPU_STAT(instruction_iske) },
|
||||
{ "instruction_ri", VCPU_STAT(instruction_ri) },
|
||||
{ "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
|
||||
{ "instruction_sske", VCPU_STAT(instruction_sske) },
|
||||
{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
|
||||
{ "instruction_essa", VCPU_STAT(instruction_essa) },
|
||||
{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
|
||||
{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
|
||||
{ "instruction_tb", VCPU_STAT(instruction_tb) },
|
||||
{ "instruction_tpi", VCPU_STAT(instruction_tpi) },
|
||||
{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
|
||||
{ "instruction_tsch", VCPU_STAT(instruction_tsch) },
|
||||
{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
|
||||
{ "instruction_sie", VCPU_STAT(instruction_sie) },
|
||||
{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
|
||||
{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
|
||||
{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
|
||||
{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
|
||||
{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
|
||||
{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
|
||||
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
|
||||
{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
|
||||
{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
|
||||
{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
|
||||
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
|
||||
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
|
||||
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
|
||||
{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
|
||||
{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
|
||||
{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
|
||||
{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
|
||||
{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
|
||||
{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
|
||||
{ "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
|
||||
{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
|
||||
{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
|
||||
{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
|
||||
{ "instruction_diag_other", VCPU_STAT(diagnose_other) },
|
||||
VCPU_STAT("userspace_handled", exit_userspace),
|
||||
VCPU_STAT("exit_null", exit_null),
|
||||
VCPU_STAT("exit_validity", exit_validity),
|
||||
VCPU_STAT("exit_stop_request", exit_stop_request),
|
||||
VCPU_STAT("exit_external_request", exit_external_request),
|
||||
VCPU_STAT("exit_io_request", exit_io_request),
|
||||
VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
|
||||
VCPU_STAT("exit_instruction", exit_instruction),
|
||||
VCPU_STAT("exit_pei", exit_pei),
|
||||
VCPU_STAT("exit_program_interruption", exit_program_interruption),
|
||||
VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
|
||||
VCPU_STAT("exit_operation_exception", exit_operation_exception),
|
||||
VCPU_STAT("halt_successful_poll", halt_successful_poll),
|
||||
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
|
||||
VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
|
||||
VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
|
||||
VCPU_STAT("halt_wakeup", halt_wakeup),
|
||||
VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
|
||||
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
|
||||
VCPU_STAT("instruction_lctlg", instruction_lctlg),
|
||||
VCPU_STAT("instruction_lctl", instruction_lctl),
|
||||
VCPU_STAT("instruction_stctl", instruction_stctl),
|
||||
VCPU_STAT("instruction_stctg", instruction_stctg),
|
||||
VCPU_STAT("deliver_ckc", deliver_ckc),
|
||||
VCPU_STAT("deliver_cputm", deliver_cputm),
|
||||
VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
|
||||
VCPU_STAT("deliver_external_call", deliver_external_call),
|
||||
VCPU_STAT("deliver_service_signal", deliver_service_signal),
|
||||
VCPU_STAT("deliver_virtio", deliver_virtio),
|
||||
VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
|
||||
VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
|
||||
VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
|
||||
VCPU_STAT("deliver_program", deliver_program),
|
||||
VCPU_STAT("deliver_io", deliver_io),
|
||||
VCPU_STAT("deliver_machine_check", deliver_machine_check),
|
||||
VCPU_STAT("exit_wait_state", exit_wait_state),
|
||||
VCPU_STAT("inject_ckc", inject_ckc),
|
||||
VCPU_STAT("inject_cputm", inject_cputm),
|
||||
VCPU_STAT("inject_external_call", inject_external_call),
|
||||
VM_STAT("inject_float_mchk", inject_float_mchk),
|
||||
VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
|
||||
VM_STAT("inject_io", inject_io),
|
||||
VCPU_STAT("inject_mchk", inject_mchk),
|
||||
VM_STAT("inject_pfault_done", inject_pfault_done),
|
||||
VCPU_STAT("inject_program", inject_program),
|
||||
VCPU_STAT("inject_restart", inject_restart),
|
||||
VM_STAT("inject_service_signal", inject_service_signal),
|
||||
VCPU_STAT("inject_set_prefix", inject_set_prefix),
|
||||
VCPU_STAT("inject_stop_signal", inject_stop_signal),
|
||||
VCPU_STAT("inject_pfault_init", inject_pfault_init),
|
||||
VM_STAT("inject_virtio", inject_virtio),
|
||||
VCPU_STAT("instruction_epsw", instruction_epsw),
|
||||
VCPU_STAT("instruction_gs", instruction_gs),
|
||||
VCPU_STAT("instruction_io_other", instruction_io_other),
|
||||
VCPU_STAT("instruction_lpsw", instruction_lpsw),
|
||||
VCPU_STAT("instruction_lpswe", instruction_lpswe),
|
||||
VCPU_STAT("instruction_pfmf", instruction_pfmf),
|
||||
VCPU_STAT("instruction_ptff", instruction_ptff),
|
||||
VCPU_STAT("instruction_stidp", instruction_stidp),
|
||||
VCPU_STAT("instruction_sck", instruction_sck),
|
||||
VCPU_STAT("instruction_sckpf", instruction_sckpf),
|
||||
VCPU_STAT("instruction_spx", instruction_spx),
|
||||
VCPU_STAT("instruction_stpx", instruction_stpx),
|
||||
VCPU_STAT("instruction_stap", instruction_stap),
|
||||
VCPU_STAT("instruction_iske", instruction_iske),
|
||||
VCPU_STAT("instruction_ri", instruction_ri),
|
||||
VCPU_STAT("instruction_rrbe", instruction_rrbe),
|
||||
VCPU_STAT("instruction_sske", instruction_sske),
|
||||
VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
|
||||
VCPU_STAT("instruction_essa", instruction_essa),
|
||||
VCPU_STAT("instruction_stsi", instruction_stsi),
|
||||
VCPU_STAT("instruction_stfl", instruction_stfl),
|
||||
VCPU_STAT("instruction_tb", instruction_tb),
|
||||
VCPU_STAT("instruction_tpi", instruction_tpi),
|
||||
VCPU_STAT("instruction_tprot", instruction_tprot),
|
||||
VCPU_STAT("instruction_tsch", instruction_tsch),
|
||||
VCPU_STAT("instruction_sthyi", instruction_sthyi),
|
||||
VCPU_STAT("instruction_sie", instruction_sie),
|
||||
VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
|
||||
VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
|
||||
VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
|
||||
VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
|
||||
VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
|
||||
VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
|
||||
VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
|
||||
VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
|
||||
VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
|
||||
VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
|
||||
VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
|
||||
VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
|
||||
VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
|
||||
VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
|
||||
VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
|
||||
VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
|
||||
VCPU_STAT("instruction_diag_10", diagnose_10),
|
||||
VCPU_STAT("instruction_diag_44", diagnose_44),
|
||||
VCPU_STAT("instruction_diag_9c", diagnose_9c),
|
||||
VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
|
||||
VCPU_STAT("instruction_diag_258", diagnose_258),
|
||||
VCPU_STAT("instruction_diag_308", diagnose_308),
|
||||
VCPU_STAT("instruction_diag_500", diagnose_500),
|
||||
VCPU_STAT("instruction_diag_other", diagnose_other),
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
@ -3944,7 +3943,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
|
||||
/* s390 will always inject the page directly */
|
||||
}
|
||||
|
||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
||||
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* s390 will always inject the page directly,
|
||||
@ -4337,8 +4336,9 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
store_regs_fmt2(vcpu, kvm_run);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
int rc;
|
||||
|
||||
if (kvm_run->immediate_exit)
|
||||
|
@ -1000,8 +1000,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
|
||||
handle_last_fault(vcpu, vsie_page);
|
||||
|
||||
if (need_resched())
|
||||
schedule();
|
||||
if (test_cpu_flag(CIF_MCCK_PENDING))
|
||||
s390_handle_mcck();
|
||||
|
||||
@ -1185,6 +1183,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
kvm_s390_vcpu_has_irq(vcpu, 0) ||
|
||||
kvm_s390_vcpu_sie_inhibited(vcpu))
|
||||
break;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (rc == -EFAULT) {
|
||||
|
@ -788,19 +788,19 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
|
||||
unsigned long gaddr, int level)
|
||||
{
|
||||
const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
|
||||
unsigned long *table;
|
||||
unsigned long *table = gmap->table;
|
||||
|
||||
if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
|
||||
return NULL;
|
||||
if (gmap_is_shadow(gmap) && gmap->removed)
|
||||
return NULL;
|
||||
|
||||
if (WARN_ON_ONCE(level > (asce_type >> 2) + 1))
|
||||
return NULL;
|
||||
|
||||
if (asce_type != _ASCE_TYPE_REGION1 &&
|
||||
gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
|
||||
return NULL;
|
||||
|
||||
table = gmap->table;
|
||||
switch (gmap->asce & _ASCE_TYPE_MASK) {
|
||||
switch (asce_type) {
|
||||
case _ASCE_TYPE_REGION1:
|
||||
table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
|
||||
if (level == 4)
|
||||
|
@ -1691,14 +1691,6 @@ SYM_CODE_START(general_protection)
|
||||
jmp common_exception
|
||||
SYM_CODE_END(general_protection)
|
||||
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
SYM_CODE_START(async_page_fault)
|
||||
ASM_CLAC
|
||||
pushl $do_async_page_fault
|
||||
jmp common_exception_read_cr2
|
||||
SYM_CODE_END(async_page_fault)
|
||||
#endif
|
||||
|
||||
SYM_CODE_START(rewind_stack_do_exit)
|
||||
/* Prevent any naive code from trying to unwind to our caller. */
|
||||
xorl %ebp, %ebp
|
||||
|
@ -1202,10 +1202,6 @@ idtentry xendebug do_debug has_error_code=0
|
||||
idtentry general_protection do_general_protection has_error_code=1
|
||||
idtentry page_fault do_page_fault has_error_code=1 read_cr2=1
|
||||
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
idtentry async_page_fault do_async_page_fault has_error_code=1 read_cr2=1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
idtentry machine_check do_mce has_error_code=0 paranoid=1
|
||||
#endif
|
||||
|
@ -83,6 +83,8 @@
|
||||
#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8)
|
||||
/* Crash MSR available */
|
||||
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
|
||||
/* Support for debug MSRs available */
|
||||
#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
|
||||
/* stimer Direct Mode is available */
|
||||
#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
|
||||
|
||||
|
@ -83,6 +83,10 @@
|
||||
#define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24)
|
||||
#define KVM_REQ_APICV_UPDATE \
|
||||
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
|
||||
#define KVM_REQ_HV_TLB_FLUSH \
|
||||
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
|
||||
|
||||
#define CR0_RESERVED_BITS \
|
||||
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
|
||||
@ -107,15 +111,8 @@
|
||||
#define UNMAPPED_GVA (~(gpa_t)0)
|
||||
|
||||
/* KVM Hugepage definitions for x86 */
|
||||
enum {
|
||||
PT_PAGE_TABLE_LEVEL = 1,
|
||||
PT_DIRECTORY_LEVEL = 2,
|
||||
PT_PDPE_LEVEL = 3,
|
||||
/* set max level to the biggest one */
|
||||
PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
|
||||
};
|
||||
#define KVM_NR_PAGE_SIZES (PT_MAX_HUGEPAGE_LEVEL - \
|
||||
PT_PAGE_TABLE_LEVEL + 1)
|
||||
#define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
|
||||
#define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
|
||||
#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
|
||||
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
|
||||
#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
|
||||
@ -124,7 +121,7 @@ enum {
|
||||
|
||||
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
||||
{
|
||||
/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
|
||||
/* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
|
||||
return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
|
||||
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
|
||||
}
|
||||
@ -164,9 +161,13 @@ enum kvm_reg {
|
||||
NR_VCPU_REGS,
|
||||
|
||||
VCPU_EXREG_PDPTR = NR_VCPU_REGS,
|
||||
VCPU_EXREG_CR0,
|
||||
VCPU_EXREG_CR3,
|
||||
VCPU_EXREG_CR4,
|
||||
VCPU_EXREG_RFLAGS,
|
||||
VCPU_EXREG_SEGMENTS,
|
||||
VCPU_EXREG_EXIT_INFO_1,
|
||||
VCPU_EXREG_EXIT_INFO_2,
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -182,8 +183,10 @@ enum {
|
||||
|
||||
enum exit_fastpath_completion {
|
||||
EXIT_FASTPATH_NONE,
|
||||
EXIT_FASTPATH_SKIP_EMUL_INS,
|
||||
EXIT_FASTPATH_REENTER_GUEST,
|
||||
EXIT_FASTPATH_EXIT_HANDLED,
|
||||
};
|
||||
typedef enum exit_fastpath_completion fastpath_t;
|
||||
|
||||
struct x86_emulate_ctxt;
|
||||
struct x86_exception;
|
||||
@ -372,12 +375,12 @@ struct rsvd_bits_validate {
|
||||
};
|
||||
|
||||
struct kvm_mmu_root_info {
|
||||
gpa_t cr3;
|
||||
gpa_t pgd;
|
||||
hpa_t hpa;
|
||||
};
|
||||
|
||||
#define KVM_MMU_ROOT_INFO_INVALID \
|
||||
((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })
|
||||
((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
|
||||
|
||||
#define KVM_MMU_NUM_PREV_ROOTS 3
|
||||
|
||||
@ -403,7 +406,7 @@ struct kvm_mmu {
|
||||
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
u64 *spte, const void *pte);
|
||||
hpa_t root_hpa;
|
||||
gpa_t root_cr3;
|
||||
gpa_t root_pgd;
|
||||
union kvm_mmu_role mmu_role;
|
||||
u8 root_level;
|
||||
u8 shadow_root_level;
|
||||
@ -598,6 +601,7 @@ struct kvm_vcpu_arch {
|
||||
u64 ia32_xss;
|
||||
u64 microcode_version;
|
||||
u64 arch_capabilities;
|
||||
u64 perf_capabilities;
|
||||
|
||||
/*
|
||||
* Paging state of the vcpu
|
||||
@ -650,7 +654,6 @@ struct kvm_vcpu_arch {
|
||||
|
||||
u64 xcr0;
|
||||
u64 guest_supported_xcr0;
|
||||
u32 guest_xstate_size;
|
||||
|
||||
struct kvm_pio_request pio;
|
||||
void *pio_data;
|
||||
@ -680,6 +683,7 @@ struct kvm_vcpu_arch {
|
||||
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
|
||||
|
||||
int maxphyaddr;
|
||||
int tdp_level;
|
||||
|
||||
/* emulate context */
|
||||
|
||||
@ -703,6 +707,7 @@ struct kvm_vcpu_arch {
|
||||
struct gfn_to_pfn_cache cache;
|
||||
} st;
|
||||
|
||||
u64 l1_tsc_offset;
|
||||
u64 tsc_offset;
|
||||
u64 last_guest_tsc;
|
||||
u64 last_host_tsc;
|
||||
@ -762,14 +767,17 @@ struct kvm_vcpu_arch {
|
||||
|
||||
struct {
|
||||
bool halted;
|
||||
gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
|
||||
gfn_t gfns[ASYNC_PF_PER_VCPU];
|
||||
struct gfn_to_hva_cache data;
|
||||
u64 msr_val;
|
||||
u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
|
||||
u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
|
||||
u16 vec;
|
||||
u32 id;
|
||||
bool send_user_only;
|
||||
u32 host_apf_reason;
|
||||
u32 host_apf_flags;
|
||||
unsigned long nested_apf_token;
|
||||
bool delivery_as_pf_vmexit;
|
||||
bool pageready_pending;
|
||||
} apf;
|
||||
|
||||
/* OSVW MSRs (AMD only) */
|
||||
@ -855,6 +863,18 @@ struct kvm_apic_map {
|
||||
struct kvm_lapic *phys_map[];
|
||||
};
|
||||
|
||||
/* Hyper-V synthetic debugger (SynDbg)*/
|
||||
struct kvm_hv_syndbg {
|
||||
struct {
|
||||
u64 control;
|
||||
u64 status;
|
||||
u64 send_page;
|
||||
u64 recv_page;
|
||||
u64 pending_page;
|
||||
} control;
|
||||
u64 options;
|
||||
};
|
||||
|
||||
/* Hyper-V emulation context */
|
||||
struct kvm_hv {
|
||||
struct mutex hv_lock;
|
||||
@ -878,6 +898,7 @@ struct kvm_hv {
|
||||
atomic_t num_mismatched_vp_indexes;
|
||||
|
||||
struct hv_partition_assist_pg *hv_pa_pg;
|
||||
struct kvm_hv_syndbg hv_syndbg;
|
||||
};
|
||||
|
||||
enum kvm_irqchip_mode {
|
||||
@ -1028,6 +1049,8 @@ struct kvm_vcpu_stat {
|
||||
u64 irq_injections;
|
||||
u64 nmi_injections;
|
||||
u64 req_event;
|
||||
u64 halt_poll_success_ns;
|
||||
u64 halt_poll_fail_ns;
|
||||
};
|
||||
|
||||
struct x86_instruction_info;
|
||||
@ -1059,7 +1082,7 @@ struct kvm_x86_ops {
|
||||
void (*hardware_disable)(void);
|
||||
void (*hardware_unsetup)(void);
|
||||
bool (*cpu_has_accelerated_tpr)(void);
|
||||
bool (*has_emulated_msr)(int index);
|
||||
bool (*has_emulated_msr)(u32 index);
|
||||
void (*cpuid_update)(struct kvm_vcpu *vcpu);
|
||||
|
||||
unsigned int vm_size;
|
||||
@ -1085,8 +1108,6 @@ struct kvm_x86_ops {
|
||||
void (*set_segment)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_segment *var, int seg);
|
||||
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
|
||||
void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
|
||||
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
|
||||
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
|
||||
@ -1100,7 +1121,8 @@ struct kvm_x86_ops {
|
||||
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
|
||||
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||
|
||||
void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
|
||||
void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
|
||||
void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
|
||||
int (*tlb_remote_flush)(struct kvm *kvm);
|
||||
int (*tlb_remote_flush_with_range)(struct kvm *kvm,
|
||||
struct kvm_tlb_range *range);
|
||||
@ -1113,7 +1135,13 @@ struct kvm_x86_ops {
|
||||
*/
|
||||
void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
|
||||
|
||||
void (*run)(struct kvm_vcpu *vcpu);
|
||||
/*
|
||||
* Flush any TLB entries created by the guest. Like tlb_flush_gva(),
|
||||
* does not need to flush GPA->HPA mappings.
|
||||
*/
|
||||
void (*tlb_flush_guest)(struct kvm_vcpu *vcpu);
|
||||
|
||||
enum exit_fastpath_completion (*run)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_exit)(struct kvm_vcpu *vcpu,
|
||||
enum exit_fastpath_completion exit_fastpath);
|
||||
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||
@ -1126,8 +1154,8 @@ struct kvm_x86_ops {
|
||||
void (*set_nmi)(struct kvm_vcpu *vcpu);
|
||||
void (*queue_exception)(struct kvm_vcpu *vcpu);
|
||||
void (*cancel_injection)(struct kvm_vcpu *vcpu);
|
||||
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
|
||||
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
|
||||
int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
|
||||
int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
|
||||
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
|
||||
void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
|
||||
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
|
||||
@ -1141,7 +1169,7 @@ struct kvm_x86_ops {
|
||||
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
|
||||
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
|
||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
|
||||
int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
||||
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
|
||||
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
||||
@ -1153,7 +1181,6 @@ struct kvm_x86_ops {
|
||||
|
||||
bool (*has_wbinvd_exit)(void);
|
||||
|
||||
u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
|
||||
/* Returns actual tsc_offset set in active VMCS */
|
||||
u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
|
||||
|
||||
@ -1163,10 +1190,8 @@ struct kvm_x86_ops {
|
||||
struct x86_instruction_info *info,
|
||||
enum x86_intercept_stage stage,
|
||||
struct x86_exception *exception);
|
||||
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
|
||||
enum exit_fastpath_completion *exit_fastpath);
|
||||
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*check_nested_events)(struct kvm_vcpu *vcpu);
|
||||
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
|
||||
@ -1199,6 +1224,7 @@ struct kvm_x86_ops {
|
||||
|
||||
/* pmu operations of sub-arch */
|
||||
const struct kvm_pmu_ops *pmu_ops;
|
||||
const struct kvm_x86_nested_ops *nested_ops;
|
||||
|
||||
/*
|
||||
* Architecture specific hooks for vCPU blocking due to
|
||||
@ -1226,18 +1252,10 @@ struct kvm_x86_ops {
|
||||
|
||||
void (*setup_mce)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*get_nested_state)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_nested_state __user *user_kvm_nested_state,
|
||||
unsigned user_data_size);
|
||||
int (*set_nested_state)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_nested_state __user *user_kvm_nested_state,
|
||||
struct kvm_nested_state *kvm_state);
|
||||
bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*smi_allowed)(struct kvm_vcpu *vcpu);
|
||||
int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
|
||||
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
|
||||
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
|
||||
int (*enable_smi_window)(struct kvm_vcpu *vcpu);
|
||||
void (*enable_smi_window)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
|
||||
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
|
||||
@ -1245,14 +1263,28 @@ struct kvm_x86_ops {
|
||||
|
||||
int (*get_msr_feature)(struct kvm_msr_entry *entry);
|
||||
|
||||
int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
|
||||
uint16_t *vmcs_version);
|
||||
uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
|
||||
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*migrate_timers)(struct kvm_vcpu *vcpu);
|
||||
};
|
||||
|
||||
struct kvm_x86_nested_ops {
|
||||
int (*check_events)(struct kvm_vcpu *vcpu);
|
||||
bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
|
||||
int (*get_state)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_nested_state __user *user_kvm_nested_state,
|
||||
unsigned user_data_size);
|
||||
int (*set_state)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_nested_state __user *user_kvm_nested_state,
|
||||
struct kvm_nested_state *kvm_state);
|
||||
bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*enable_evmcs)(struct kvm_vcpu *vcpu,
|
||||
uint16_t *vmcs_version);
|
||||
uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
|
||||
};
|
||||
|
||||
struct kvm_x86_init_ops {
|
||||
@ -1451,6 +1483,8 @@ void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long pay
|
||||
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
|
||||
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
|
||||
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
|
||||
bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
|
||||
struct x86_exception *fault);
|
||||
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
gfn_t gfn, void *data, int offset, int len,
|
||||
u32 access);
|
||||
@ -1478,6 +1512,8 @@ void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
|
||||
|
||||
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_update_dr7(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
|
||||
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
|
||||
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
|
||||
@ -1508,8 +1544,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
|
||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
||||
void *insn, int insn_len);
|
||||
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
|
||||
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
gva_t gva, hpa_t root_hpa);
|
||||
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
|
||||
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
|
||||
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
|
||||
bool skip_mmu_sync);
|
||||
|
||||
void kvm_configure_mmu(bool enable_tdp, int tdp_page_level);
|
||||
|
||||
@ -1573,8 +1612,6 @@ enum {
|
||||
};
|
||||
|
||||
#define HF_GIF_MASK (1 << 0)
|
||||
#define HF_HIF_MASK (1 << 1)
|
||||
#define HF_VINTR_MASK (1 << 2)
|
||||
#define HF_NMI_MASK (1 << 3)
|
||||
#define HF_IRET_MASK (1 << 4)
|
||||
#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
|
||||
@ -1640,7 +1677,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
|
||||
struct kvm_async_pf *work);
|
||||
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
|
||||
struct kvm_async_pf *work);
|
||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
|
||||
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
|
||||
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
|
||||
|
@ -88,11 +88,21 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
|
||||
bool kvm_para_available(void);
|
||||
unsigned int kvm_arch_para_features(void);
|
||||
unsigned int kvm_arch_para_hints(void);
|
||||
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
|
||||
void kvm_async_pf_task_wait_schedule(u32 token);
|
||||
void kvm_async_pf_task_wake(u32 token);
|
||||
u32 kvm_read_and_reset_pf_reason(void);
|
||||
extern void kvm_disable_steal_time(void);
|
||||
void do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
|
||||
u32 kvm_read_and_reset_apf_flags(void);
|
||||
void kvm_disable_steal_time(void);
|
||||
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
|
||||
|
||||
static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
||||
{
|
||||
if (static_branch_unlikely(&kvm_async_pf_enabled))
|
||||
return __kvm_handle_async_pf(regs, token);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
void __init kvm_spinlock_init(void);
|
||||
@ -103,7 +113,7 @@ static inline void kvm_spinlock_init(void)
|
||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||
|
||||
#else /* CONFIG_KVM_GUEST */
|
||||
#define kvm_async_pf_task_wait(T, I) do {} while(0)
|
||||
#define kvm_async_pf_task_wait_schedule(T) do {} while(0)
|
||||
#define kvm_async_pf_task_wake(T) do {} while(0)
|
||||
|
||||
static inline bool kvm_para_available(void)
|
||||
@ -121,7 +131,7 @@ static inline unsigned int kvm_arch_para_hints(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 kvm_read_and_reset_pf_reason(void)
|
||||
static inline u32 kvm_read_and_reset_apf_flags(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -130,6 +140,11 @@ static inline void kvm_disable_steal_time(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_KVM_PARA_H */
|
||||
|
@ -96,7 +96,6 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
|
||||
u8 reserved_6[8]; /* Offset 0xe8 */
|
||||
u64 avic_logical_id; /* Offset 0xf0 */
|
||||
u64 avic_physical_id; /* Offset 0xf8 */
|
||||
u8 reserved_7[768];
|
||||
};
|
||||
|
||||
|
||||
@ -203,8 +202,16 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
|
||||
u64 last_excp_to;
|
||||
};
|
||||
|
||||
|
||||
static inline void __unused_size_checks(void)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct vmcb_save_area) != 0x298);
|
||||
BUILD_BUG_ON(sizeof(struct vmcb_control_area) != 256);
|
||||
}
|
||||
|
||||
struct __attribute__ ((__packed__)) vmcb {
|
||||
struct vmcb_control_area control;
|
||||
u8 reserved_control[1024 - sizeof(struct vmcb_control_area)];
|
||||
struct vmcb_save_area save;
|
||||
};
|
||||
|
||||
|
@ -527,10 +527,12 @@ struct vmx_msr_entry {
|
||||
/*
|
||||
* Exit Qualifications for entry failure during or after loading guest state
|
||||
*/
|
||||
#define ENTRY_FAIL_DEFAULT 0
|
||||
#define ENTRY_FAIL_PDPTE 2
|
||||
#define ENTRY_FAIL_NMI 3
|
||||
#define ENTRY_FAIL_VMCS_LINK_PTR 4
|
||||
enum vm_entry_failure_code {
|
||||
ENTRY_FAIL_DEFAULT = 0,
|
||||
ENTRY_FAIL_PDPTE = 2,
|
||||
ENTRY_FAIL_NMI = 3,
|
||||
ENTRY_FAIL_VMCS_LINK_PTR = 4,
|
||||
};
|
||||
|
||||
/*
|
||||
* Exit Qualifications for EPT Violations
|
||||
|
@ -50,14 +50,12 @@ struct x86_init_resources {
|
||||
* @pre_vector_init: init code to run before interrupt vectors
|
||||
* are set up.
|
||||
* @intr_init: interrupt init code
|
||||
* @trap_init: platform specific trap setup
|
||||
* @intr_mode_select: interrupt delivery mode selection
|
||||
* @intr_mode_init: interrupt delivery mode setup
|
||||
*/
|
||||
struct x86_init_irqs {
|
||||
void (*pre_vector_init)(void);
|
||||
void (*intr_init)(void);
|
||||
void (*trap_init)(void);
|
||||
void (*intr_mode_select)(void);
|
||||
void (*intr_mode_init)(void);
|
||||
};
|
||||
|
@ -385,32 +385,48 @@ struct kvm_sync_regs {
|
||||
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
|
||||
|
||||
#define KVM_STATE_NESTED_FORMAT_VMX 0
|
||||
#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */
|
||||
#define KVM_STATE_NESTED_FORMAT_SVM 1
|
||||
|
||||
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
|
||||
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
|
||||
#define KVM_STATE_NESTED_EVMCS 0x00000004
|
||||
#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
|
||||
#define KVM_STATE_NESTED_GIF_SET 0x00000100
|
||||
|
||||
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
|
||||
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
|
||||
|
||||
#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
|
||||
|
||||
#define KVM_STATE_NESTED_SVM_VMCB_SIZE 0x1000
|
||||
|
||||
#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
|
||||
|
||||
struct kvm_vmx_nested_state_data {
|
||||
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
|
||||
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
|
||||
};
|
||||
|
||||
struct kvm_vmx_nested_state_hdr {
|
||||
__u32 flags;
|
||||
__u64 vmxon_pa;
|
||||
__u64 vmcs12_pa;
|
||||
__u64 preemption_timer_deadline;
|
||||
|
||||
struct {
|
||||
__u16 flags;
|
||||
} smm;
|
||||
};
|
||||
|
||||
struct kvm_svm_nested_state_data {
|
||||
/* Save area only used if KVM_STATE_NESTED_RUN_PENDING. */
|
||||
__u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
|
||||
};
|
||||
|
||||
struct kvm_svm_nested_state_hdr {
|
||||
__u64 vmcb_pa;
|
||||
};
|
||||
|
||||
/* for KVM_CAP_NESTED_STATE */
|
||||
struct kvm_nested_state {
|
||||
__u16 flags;
|
||||
@ -419,6 +435,7 @@ struct kvm_nested_state {
|
||||
|
||||
union {
|
||||
struct kvm_vmx_nested_state_hdr vmx;
|
||||
struct kvm_svm_nested_state_hdr svm;
|
||||
|
||||
/* Pad the header to 128 bytes. */
|
||||
__u8 pad[120];
|
||||
@ -431,6 +448,7 @@ struct kvm_nested_state {
|
||||
*/
|
||||
union {
|
||||
struct kvm_vmx_nested_state_data vmx[0];
|
||||
struct kvm_svm_nested_state_data svm[0];
|
||||
} data;
|
||||
};
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#define KVM_FEATURE_PV_SEND_IPI 11
|
||||
#define KVM_FEATURE_POLL_CONTROL 12
|
||||
#define KVM_FEATURE_PV_SCHED_YIELD 13
|
||||
#define KVM_FEATURE_ASYNC_PF_INT 14
|
||||
|
||||
#define KVM_HINTS_REALTIME 0
|
||||
|
||||
@ -50,6 +51,8 @@
|
||||
#define MSR_KVM_STEAL_TIME 0x4b564d03
|
||||
#define MSR_KVM_PV_EOI_EN 0x4b564d04
|
||||
#define MSR_KVM_POLL_CONTROL 0x4b564d05
|
||||
#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
|
||||
#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
|
||||
|
||||
struct kvm_steal_time {
|
||||
__u64 steal;
|
||||
@ -81,6 +84,11 @@ struct kvm_clock_pairing {
|
||||
#define KVM_ASYNC_PF_ENABLED (1 << 0)
|
||||
#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
|
||||
#define KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT (1 << 2)
|
||||
#define KVM_ASYNC_PF_DELIVERY_AS_INT (1 << 3)
|
||||
|
||||
/* MSR_KVM_ASYNC_PF_INT */
|
||||
#define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0)
|
||||
|
||||
|
||||
/* Operations for KVM_HC_MMU_OP */
|
||||
#define KVM_MMU_OP_WRITE_PTE 1
|
||||
@ -112,8 +120,13 @@ struct kvm_mmu_op_release_pt {
|
||||
#define KVM_PV_REASON_PAGE_READY 2
|
||||
|
||||
struct kvm_vcpu_pv_apf_data {
|
||||
__u32 reason;
|
||||
__u8 pad[60];
|
||||
/* Used for 'page not present' events delivered via #PF */
|
||||
__u32 flags;
|
||||
|
||||
/* Used for 'page ready' events delivered via interrupt notification */
|
||||
__u32 token;
|
||||
|
||||
__u8 pad[56];
|
||||
__u32 enabled;
|
||||
};
|
||||
|
||||
|
@ -150,6 +150,9 @@
|
||||
{ EXIT_REASON_UMWAIT, "UMWAIT" }, \
|
||||
{ EXIT_REASON_TPAUSE, "TPAUSE" }
|
||||
|
||||
#define VMX_EXIT_REASON_FLAGS \
|
||||
{ VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" }
|
||||
|
||||
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
|
||||
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
|
||||
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
|
||||
|
@ -35,6 +35,8 @@
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/cpuidle_haltpoll.h>
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
|
||||
|
||||
static int kvmapf = 1;
|
||||
|
||||
static int __init parse_no_kvmapf(char *arg)
|
||||
@ -73,7 +75,6 @@ struct kvm_task_sleep_node {
|
||||
struct swait_queue_head wq;
|
||||
u32 token;
|
||||
int cpu;
|
||||
bool halted;
|
||||
};
|
||||
|
||||
static struct kvm_task_sleep_head {
|
||||
@ -96,77 +97,64 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* @interrupt_kernel: Is this called from a routine which interrupts the kernel
|
||||
* (other than user space)?
|
||||
*/
|
||||
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
|
||||
static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
|
||||
{
|
||||
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
|
||||
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
|
||||
struct kvm_task_sleep_node n, *e;
|
||||
DECLARE_SWAITQUEUE(wait);
|
||||
|
||||
rcu_irq_enter();
|
||||
struct kvm_task_sleep_node *e;
|
||||
|
||||
raw_spin_lock(&b->lock);
|
||||
e = _find_apf_task(b, token);
|
||||
if (e) {
|
||||
/* dummy entry exist -> wake up was delivered ahead of PF */
|
||||
hlist_del(&e->link);
|
||||
kfree(e);
|
||||
raw_spin_unlock(&b->lock);
|
||||
|
||||
rcu_irq_exit();
|
||||
return;
|
||||
kfree(e);
|
||||
return false;
|
||||
}
|
||||
|
||||
n.token = token;
|
||||
n.cpu = smp_processor_id();
|
||||
n.halted = is_idle_task(current) ||
|
||||
(IS_ENABLED(CONFIG_PREEMPT_COUNT)
|
||||
? preempt_count() > 1 || rcu_preempt_depth()
|
||||
: interrupt_kernel);
|
||||
init_swait_queue_head(&n.wq);
|
||||
hlist_add_head(&n.link, &b->list);
|
||||
n->token = token;
|
||||
n->cpu = smp_processor_id();
|
||||
init_swait_queue_head(&n->wq);
|
||||
hlist_add_head(&n->link, &b->list);
|
||||
raw_spin_unlock(&b->lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
|
||||
* @token: Token to identify the sleep node entry
|
||||
*
|
||||
* Invoked from the async pagefault handling code or from the VM exit page
|
||||
* fault handler. In both cases RCU is watching.
|
||||
*/
|
||||
void kvm_async_pf_task_wait_schedule(u32 token)
|
||||
{
|
||||
struct kvm_task_sleep_node n;
|
||||
DECLARE_SWAITQUEUE(wait);
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (!kvm_async_pf_queue_task(token, &n))
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
if (!n.halted)
|
||||
prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
|
||||
if (hlist_unhashed(&n.link))
|
||||
break;
|
||||
|
||||
rcu_irq_exit();
|
||||
|
||||
if (!n.halted) {
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
local_irq_disable();
|
||||
} else {
|
||||
/*
|
||||
* We cannot reschedule. So halt.
|
||||
*/
|
||||
native_safe_halt();
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
rcu_irq_enter();
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
local_irq_disable();
|
||||
}
|
||||
if (!n.halted)
|
||||
finish_swait(&n.wq, &wait);
|
||||
|
||||
rcu_irq_exit();
|
||||
return;
|
||||
finish_swait(&n.wq, &wait);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
|
||||
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
|
||||
|
||||
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
|
||||
{
|
||||
hlist_del_init(&n->link);
|
||||
if (n->halted)
|
||||
smp_send_reschedule(n->cpu);
|
||||
else if (swq_has_sleeper(&n->wq))
|
||||
if (swq_has_sleeper(&n->wq))
|
||||
swake_up_one(&n->wq);
|
||||
}
|
||||
|
||||
@ -175,12 +163,13 @@ static void apf_task_wake_all(void)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
|
||||
struct hlist_node *p, *next;
|
||||
struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
|
||||
struct kvm_task_sleep_node *n;
|
||||
struct hlist_node *p, *next;
|
||||
|
||||
raw_spin_lock(&b->lock);
|
||||
hlist_for_each_safe(p, next, &b->list) {
|
||||
struct kvm_task_sleep_node *n =
|
||||
hlist_entry(p, typeof(*n), link);
|
||||
n = hlist_entry(p, typeof(*n), link);
|
||||
if (n->cpu == smp_processor_id())
|
||||
apf_task_wake_one(n);
|
||||
}
|
||||
@ -221,46 +210,61 @@ again:
|
||||
n->cpu = smp_processor_id();
|
||||
init_swait_queue_head(&n->wq);
|
||||
hlist_add_head(&n->link, &b->list);
|
||||
} else
|
||||
} else {
|
||||
apf_task_wake_one(n);
|
||||
}
|
||||
raw_spin_unlock(&b->lock);
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
|
||||
|
||||
u32 kvm_read_and_reset_pf_reason(void)
|
||||
u32 kvm_read_and_reset_apf_flags(void)
|
||||
{
|
||||
u32 reason = 0;
|
||||
u32 flags = 0;
|
||||
|
||||
if (__this_cpu_read(apf_reason.enabled)) {
|
||||
reason = __this_cpu_read(apf_reason.reason);
|
||||
__this_cpu_write(apf_reason.reason, 0);
|
||||
flags = __this_cpu_read(apf_reason.flags);
|
||||
__this_cpu_write(apf_reason.flags, 0);
|
||||
}
|
||||
|
||||
return reason;
|
||||
return flags;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
|
||||
NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
|
||||
EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
|
||||
NOKPROBE_SYMBOL(kvm_read_and_reset_apf_flags);
|
||||
|
||||
dotraplinkage void
|
||||
do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
|
||||
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
||||
{
|
||||
switch (kvm_read_and_reset_pf_reason()) {
|
||||
default:
|
||||
do_page_fault(regs, error_code, address);
|
||||
break;
|
||||
u32 reason = kvm_read_and_reset_apf_flags();
|
||||
|
||||
switch (reason) {
|
||||
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
||||
/* page is swapped out by the host. */
|
||||
kvm_async_pf_task_wait((u32)address, !user_mode(regs));
|
||||
break;
|
||||
case KVM_PV_REASON_PAGE_READY:
|
||||
rcu_irq_enter();
|
||||
kvm_async_pf_task_wake((u32)address);
|
||||
rcu_irq_exit();
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the host managed to inject an async #PF into an interrupt
|
||||
* disabled region, then die hard as this is not going to end well
|
||||
* and the host side is seriously broken.
|
||||
*/
|
||||
if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
|
||||
panic("Host injected async #PF in interrupt disabled region\n");
|
||||
|
||||
if (reason == KVM_PV_REASON_PAGE_NOT_PRESENT) {
|
||||
if (unlikely(!(user_mode(regs))))
|
||||
panic("Host injected async #PF in kernel mode\n");
|
||||
/* Page is swapped out by the host. */
|
||||
kvm_async_pf_task_wait_schedule(token);
|
||||
} else {
|
||||
rcu_irq_enter();
|
||||
kvm_async_pf_task_wake(token);
|
||||
rcu_irq_exit();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_async_page_fault);
|
||||
NOKPROBE_SYMBOL(__kvm_handle_async_pf);
|
||||
|
||||
static void __init paravirt_ops_setup(void)
|
||||
{
|
||||
@ -306,11 +310,11 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
|
||||
static void kvm_guest_cpu_init(void)
|
||||
{
|
||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
|
||||
u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
|
||||
u64 pa;
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
|
||||
#endif
|
||||
WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
|
||||
|
||||
pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
|
||||
pa |= KVM_ASYNC_PF_ENABLED;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
|
||||
@ -318,12 +322,12 @@ static void kvm_guest_cpu_init(void)
|
||||
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
|
||||
__this_cpu_write(apf_reason.enabled, 1);
|
||||
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
|
||||
smp_processor_id());
|
||||
pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
|
||||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
|
||||
unsigned long pa;
|
||||
|
||||
/* Size alignment is implied but just to make it explicit. */
|
||||
BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
|
||||
__this_cpu_write(kvm_apic_eoi, 0);
|
||||
@ -344,8 +348,7 @@ static void kvm_pv_disable_apf(void)
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
|
||||
__this_cpu_write(apf_reason.enabled, 0);
|
||||
|
||||
printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
|
||||
smp_processor_id());
|
||||
pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
|
||||
}
|
||||
|
||||
static void kvm_pv_guest_cpu_reboot(void *unused)
|
||||
@ -592,12 +595,6 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init kvm_apf_trap_init(void)
|
||||
{
|
||||
update_intr_gate(X86_TRAP_PF, async_page_fault);
|
||||
}
|
||||
|
||||
|
||||
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
|
||||
const struct flush_tlb_info *info)
|
||||
{
|
||||
@ -632,8 +629,6 @@ static void __init kvm_guest_init(void)
|
||||
register_reboot_notifier(&kvm_pv_reboot_nb);
|
||||
for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
|
||||
raw_spin_lock_init(&async_pf_sleepers[i].lock);
|
||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
|
||||
x86_init.irqs.trap_init = kvm_apf_trap_init;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
has_steal_clock = 1;
|
||||
@ -649,6 +644,9 @@ static void __init kvm_guest_init(void)
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
||||
apic_set_eoi_write(kvm_guest_apic_eoi_write);
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf)
|
||||
static_branch_enable(&kvm_async_pf_enabled);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
|
||||
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
|
||||
|
@ -903,7 +903,5 @@ void __init trap_init(void)
|
||||
|
||||
idt_setup_ist_traps();
|
||||
|
||||
x86_init.irqs.trap_init();
|
||||
|
||||
idt_setup_debugidt_traps();
|
||||
}
|
||||
|
@ -79,7 +79,6 @@ struct x86_init_ops x86_init __initdata = {
|
||||
.irqs = {
|
||||
.pre_vector_init = init_ISA_irqs,
|
||||
.intr_init = native_init_IRQ,
|
||||
.trap_init = x86_init_noop,
|
||||
.intr_mode_select = apic_intr_mode_select,
|
||||
.intr_mode_init = apic_intr_mode_init
|
||||
},
|
||||
|
@ -86,12 +86,10 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
|
||||
best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
|
||||
if (!best) {
|
||||
vcpu->arch.guest_supported_xcr0 = 0;
|
||||
vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
|
||||
} else {
|
||||
vcpu->arch.guest_supported_xcr0 =
|
||||
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
|
||||
vcpu->arch.guest_xstate_size = best->ebx =
|
||||
xstate_required_size(vcpu->arch.xcr0, false);
|
||||
best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
|
||||
}
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
|
||||
@ -124,8 +122,9 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
|
||||
MSR_IA32_MISC_ENABLE_MWAIT);
|
||||
}
|
||||
|
||||
/* Update physical-address width */
|
||||
/* Note, maxphyaddr must be updated before tdp_level. */
|
||||
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
|
||||
vcpu->arch.tdp_level = kvm_x86_ops.get_tdp_level(vcpu);
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
|
||||
kvm_pmu_refresh(vcpu);
|
||||
@ -297,7 +296,7 @@ void kvm_set_cpu_caps(void)
|
||||
F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
|
||||
0 /* DS-CPL, VMX, SMX, EST */ |
|
||||
0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
|
||||
F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
|
||||
F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
|
||||
F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
|
||||
F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
|
||||
0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
|
||||
@ -712,7 +711,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
(1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
|
||||
(1 << KVM_FEATURE_PV_SEND_IPI) |
|
||||
(1 << KVM_FEATURE_POLL_CONTROL) |
|
||||
(1 << KVM_FEATURE_PV_SCHED_YIELD);
|
||||
(1 << KVM_FEATURE_PV_SCHED_YIELD) |
|
||||
(1 << KVM_FEATURE_ASYNC_PF_INT);
|
||||
|
||||
if (sched_info_on())
|
||||
entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
|
||||
@ -728,6 +728,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
cpuid_entry_override(entry, CPUID_8000_0001_EDX);
|
||||
cpuid_entry_override(entry, CPUID_8000_0001_ECX);
|
||||
break;
|
||||
case 0x80000006:
|
||||
/* L2 cache and TLB: pass through host info. */
|
||||
break;
|
||||
case 0x80000007: /* Advanced power management */
|
||||
/* invariant TSC is CPUID.80000007H:EDX[8] */
|
||||
entry->edx &= (1 << 8);
|
||||
|
@ -303,4 +303,9 @@ static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
|
||||
kvm_cpu_cap_set(x86_feature);
|
||||
}
|
||||
|
||||
static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
|
||||
{
|
||||
return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5798,6 +5798,8 @@ writeback:
|
||||
}
|
||||
|
||||
ctxt->eip = ctxt->_eip;
|
||||
if (ctxt->mode != X86EMUL_MODE_PROT64)
|
||||
ctxt->eip = (u32)ctxt->_eip;
|
||||
|
||||
done:
|
||||
if (rc == X86EMUL_PROPAGATE_FAULT) {
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "x86.h"
|
||||
#include "lapic.h"
|
||||
#include "ioapic.h"
|
||||
#include "cpuid.h"
|
||||
#include "hyperv.h"
|
||||
|
||||
#include <linux/cpu.h>
|
||||
@ -266,6 +267,123 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
|
||||
entry = kvm_find_cpuid_entry(vcpu,
|
||||
HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES,
|
||||
0);
|
||||
if (!entry)
|
||||
return false;
|
||||
|
||||
return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
|
||||
}
|
||||
|
||||
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_hv *hv = &kvm->arch.hyperv;
|
||||
|
||||
if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
|
||||
hv->hv_syndbg.control.status =
|
||||
vcpu->run->hyperv.u.syndbg.status;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
|
||||
{
|
||||
struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
|
||||
struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
|
||||
|
||||
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
|
||||
hv_vcpu->exit.u.syndbg.msr = msr;
|
||||
hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
|
||||
hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
|
||||
hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
|
||||
hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
|
||||
vcpu->arch.complete_userspace_io =
|
||||
kvm_hv_syndbg_complete_userspace;
|
||||
|
||||
kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
|
||||
}
|
||||
|
||||
static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
||||
{
|
||||
struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
|
||||
|
||||
if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
|
||||
return 1;
|
||||
|
||||
trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
|
||||
vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data);
|
||||
switch (msr) {
|
||||
case HV_X64_MSR_SYNDBG_CONTROL:
|
||||
syndbg->control.control = data;
|
||||
if (!host)
|
||||
syndbg_exit(vcpu, msr);
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_STATUS:
|
||||
syndbg->control.status = data;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_SEND_BUFFER:
|
||||
syndbg->control.send_page = data;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_RECV_BUFFER:
|
||||
syndbg->control.recv_page = data;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
|
||||
syndbg->control.pending_page = data;
|
||||
if (!host)
|
||||
syndbg_exit(vcpu, msr);
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_OPTIONS:
|
||||
syndbg->options = data;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
|
||||
{
|
||||
struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
|
||||
|
||||
if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
|
||||
return 1;
|
||||
|
||||
switch (msr) {
|
||||
case HV_X64_MSR_SYNDBG_CONTROL:
|
||||
*pdata = syndbg->control.control;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_STATUS:
|
||||
*pdata = syndbg->control.status;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_SEND_BUFFER:
|
||||
*pdata = syndbg->control.send_page;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_RECV_BUFFER:
|
||||
*pdata = syndbg->control.recv_page;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
|
||||
*pdata = syndbg->control.pending_page;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_OPTIONS:
|
||||
*pdata = syndbg->options;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id,
|
||||
vcpu_to_hv_vcpu(vcpu)->vp_index, msr,
|
||||
*pdata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
|
||||
bool host)
|
||||
{
|
||||
@ -800,6 +918,8 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
|
||||
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
|
||||
case HV_X64_MSR_TSC_EMULATION_CONTROL:
|
||||
case HV_X64_MSR_TSC_EMULATION_STATUS:
|
||||
case HV_X64_MSR_SYNDBG_OPTIONS:
|
||||
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
|
||||
r = true;
|
||||
break;
|
||||
}
|
||||
@ -1061,6 +1181,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
|
||||
if (!host)
|
||||
return 1;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_OPTIONS:
|
||||
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
|
||||
return syndbg_set_msr(vcpu, msr, data, host);
|
||||
default:
|
||||
vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
|
||||
msr, data);
|
||||
@ -1190,7 +1313,8 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
|
||||
bool host)
|
||||
{
|
||||
u64 data = 0;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
@ -1227,6 +1351,9 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
case HV_X64_MSR_TSC_EMULATION_STATUS:
|
||||
data = hv->hv_tsc_emulation_status;
|
||||
break;
|
||||
case HV_X64_MSR_SYNDBG_OPTIONS:
|
||||
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
|
||||
return syndbg_get_msr(vcpu, msr, pdata, host);
|
||||
default:
|
||||
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
|
||||
return 1;
|
||||
@ -1316,7 +1443,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
|
||||
int r;
|
||||
|
||||
mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
|
||||
r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
|
||||
r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
|
||||
mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
|
||||
return r;
|
||||
} else
|
||||
@ -1425,8 +1552,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
|
||||
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
|
||||
* analyze it here, flush TLB regardless of the specified address space.
|
||||
*/
|
||||
kvm_make_vcpus_request_mask(kvm,
|
||||
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
|
||||
NULL, vcpu_mask, &hv_vcpu->tlb_flush);
|
||||
|
||||
ret_success:
|
||||
@ -1530,7 +1656,7 @@ ret_success:
|
||||
|
||||
bool kvm_hv_hypercall_enabled(struct kvm *kvm)
|
||||
{
|
||||
return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
|
||||
return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0;
|
||||
}
|
||||
|
||||
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
|
||||
@ -1709,6 +1835,34 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
|
||||
break;
|
||||
case HVCALL_POST_DEBUG_DATA:
|
||||
case HVCALL_RETRIEVE_DEBUG_DATA:
|
||||
if (unlikely(fast)) {
|
||||
ret = HV_STATUS_INVALID_PARAMETER;
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
case HVCALL_RESET_DEBUG_SESSION: {
|
||||
struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
|
||||
|
||||
if (!kvm_hv_is_syndbg_enabled(vcpu)) {
|
||||
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
|
||||
ret = HV_STATUS_OPERATION_DENIED;
|
||||
break;
|
||||
}
|
||||
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
|
||||
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
|
||||
vcpu->run->hyperv.u.hcall.input = param;
|
||||
vcpu->run->hyperv.u.hcall.params[0] = ingpa;
|
||||
vcpu->run->hyperv.u.hcall.params[1] = outgpa;
|
||||
vcpu->arch.complete_userspace_io =
|
||||
kvm_hv_hypercall_complete_userspace;
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
|
||||
break;
|
||||
@ -1796,12 +1950,15 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||
{ .function = HYPERV_CPUID_FEATURES },
|
||||
{ .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
|
||||
{ .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
|
||||
{ .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
|
||||
{ .function = HYPERV_CPUID_SYNDBG_INTERFACE },
|
||||
{ .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
|
||||
{ .function = HYPERV_CPUID_NESTED_FEATURES },
|
||||
};
|
||||
int i, nent = ARRAY_SIZE(cpuid_entries);
|
||||
|
||||
if (kvm_x86_ops.nested_get_evmcs_version)
|
||||
evmcs_ver = kvm_x86_ops.nested_get_evmcs_version(vcpu);
|
||||
if (kvm_x86_ops.nested_ops->get_evmcs_version)
|
||||
evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
|
||||
|
||||
/* Skip NESTED_FEATURES if eVMCS is not supported */
|
||||
if (!evmcs_ver)
|
||||
@ -1821,7 +1978,7 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||
case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
|
||||
memcpy(signature, "Linux KVM Hv", 12);
|
||||
|
||||
ent->eax = HYPERV_CPUID_NESTED_FEATURES;
|
||||
ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
|
||||
ent->ebx = signature[0];
|
||||
ent->ecx = signature[1];
|
||||
ent->edx = signature[2];
|
||||
@ -1860,6 +2017,10 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||
ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
|
||||
ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
|
||||
|
||||
ent->ebx |= HV_DEBUGGING;
|
||||
ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
|
||||
ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
|
||||
|
||||
/*
|
||||
* Direct Synthetic timers only make sense with in-kernel
|
||||
* LAPIC
|
||||
@ -1903,6 +2064,24 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||
|
||||
break;
|
||||
|
||||
case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
|
||||
memcpy(signature, "Linux KVM Hv", 12);
|
||||
|
||||
ent->eax = 0;
|
||||
ent->ebx = signature[0];
|
||||
ent->ecx = signature[1];
|
||||
ent->edx = signature[2];
|
||||
break;
|
||||
|
||||
case HYPERV_CPUID_SYNDBG_INTERFACE:
|
||||
memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
|
||||
ent->eax = signature[0];
|
||||
break;
|
||||
|
||||
case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
|
||||
ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -23,6 +23,33 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
/*
|
||||
* The #defines related to the synthetic debugger are required by KDNet, but
|
||||
* they are not documented in the Hyper-V TLFS because the synthetic debugger
|
||||
* functionality has been deprecated and is subject to removal in future
|
||||
* versions of Windows.
|
||||
*/
|
||||
#define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
|
||||
#define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
|
||||
#define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
|
||||
|
||||
/*
|
||||
* Hyper-V synthetic debugger platform capabilities
|
||||
* These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits.
|
||||
*/
|
||||
#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
|
||||
|
||||
/* Hyper-V Synthetic debug options MSR */
|
||||
#define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
|
||||
#define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
|
||||
#define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
|
||||
#define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
|
||||
#define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
|
||||
#define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
|
||||
|
||||
/* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
|
||||
#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
|
||||
|
||||
static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return &vcpu->arch.hyperv;
|
||||
@ -46,6 +73,11 @@ static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
|
||||
return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic));
|
||||
}
|
||||
|
||||
static inline struct kvm_hv_syndbg *vcpu_to_hv_syndbg(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return &vcpu->kvm->arch.hyperv.hv_syndbg;
|
||||
}
|
||||
|
||||
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
|
||||
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
|
||||
|
||||
|
@ -3,8 +3,8 @@
|
||||
#define __KVM_IO_APIC_H
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <kvm/iodev.h>
|
||||
#include "irq.h"
|
||||
|
||||
struct kvm;
|
||||
struct kvm_vcpu;
|
||||
@ -108,11 +108,7 @@ do { \
|
||||
|
||||
static inline int ioapic_in_kernel(struct kvm *kvm)
|
||||
{
|
||||
int mode = kvm->arch.irqchip_mode;
|
||||
|
||||
/* Matches smp_wmb() when setting irqchip_mode */
|
||||
smp_rmb();
|
||||
return mode == KVM_IRQCHIP_KERNEL;
|
||||
return irqchip_kernel(kvm);
|
||||
}
|
||||
|
||||
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
|
||||
|
@ -83,6 +83,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
|
||||
|
||||
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
|
||||
|
||||
/*
|
||||
* check if there is pending interrupt without
|
||||
@ -159,6 +160,8 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__kvm_migrate_apic_timer(vcpu);
|
||||
__kvm_migrate_pit_timer(vcpu);
|
||||
if (kvm_x86_ops.migrate_timers)
|
||||
kvm_x86_ops.migrate_timers(vcpu);
|
||||
}
|
||||
|
||||
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <kvm/iodev.h>
|
||||
#include "ioapic.h"
|
||||
#include "lapic.h"
|
||||
|
||||
#define PIC_NUM_PINS 16
|
||||
@ -66,15 +65,6 @@ void kvm_pic_destroy(struct kvm *kvm);
|
||||
int kvm_pic_read_irq(struct kvm *kvm);
|
||||
void kvm_pic_update_irq(struct kvm_pic *s);
|
||||
|
||||
static inline int pic_in_kernel(struct kvm *kvm)
|
||||
{
|
||||
int mode = kvm->arch.irqchip_mode;
|
||||
|
||||
/* Matches smp_wmb() when setting irqchip_mode */
|
||||
smp_rmb();
|
||||
return mode == KVM_IRQCHIP_KERNEL;
|
||||
}
|
||||
|
||||
static inline int irqchip_split(struct kvm *kvm)
|
||||
{
|
||||
int mode = kvm->arch.irqchip_mode;
|
||||
@ -93,6 +83,11 @@ static inline int irqchip_kernel(struct kvm *kvm)
|
||||
return mode == KVM_IRQCHIP_KERNEL;
|
||||
}
|
||||
|
||||
static inline int pic_in_kernel(struct kvm *kvm)
|
||||
{
|
||||
return irqchip_kernel(kvm);
|
||||
}
|
||||
|
||||
static inline int irqchip_in_kernel(struct kvm *kvm)
|
||||
{
|
||||
int mode = kvm->arch.irqchip_mode;
|
||||
|
@ -116,8 +116,9 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
|
||||
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||
{
|
||||
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||
if (tmask & vcpu->arch.cr0_guest_owned_bits)
|
||||
kvm_x86_ops.decache_cr0_guest_bits(vcpu);
|
||||
if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
|
||||
!kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
|
||||
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
|
||||
return vcpu->arch.cr0 & mask;
|
||||
}
|
||||
|
||||
@ -129,8 +130,9 @@ static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
|
||||
static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||
{
|
||||
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
|
||||
if (tmask & vcpu->arch.cr4_guest_owned_bits)
|
||||
kvm_x86_ops.decache_cr4_guest_bits(vcpu);
|
||||
if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
|
||||
!kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
|
||||
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
|
||||
return vcpu->arch.cr4 & mask;
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/jump_label.h>
|
||||
#include "kvm_cache_regs.h"
|
||||
#include "irq.h"
|
||||
#include "ioapic.h"
|
||||
#include "trace.h"
|
||||
#include "x86.h"
|
||||
#include "cpuid.h"
|
||||
@ -110,11 +111,18 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
|
||||
return apic->vcpu->vcpu_id;
|
||||
}
|
||||
|
||||
bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
|
||||
static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_can_post_timer_interrupt);
|
||||
|
||||
bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_x86_ops.set_hv_timer
|
||||
&& !(kvm_mwait_in_guest(vcpu->kvm) ||
|
||||
kvm_can_post_timer_interrupt(vcpu));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);
|
||||
|
||||
static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -1593,7 +1601,7 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
|
||||
}
|
||||
}
|
||||
|
||||
static void apic_timer_expired(struct kvm_lapic *apic)
|
||||
static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
struct kvm_timer *ktimer = &apic->lapic_timer;
|
||||
@ -1604,6 +1612,12 @@ static void apic_timer_expired(struct kvm_lapic *apic)
|
||||
if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
|
||||
ktimer->expired_tscdeadline = ktimer->tscdeadline;
|
||||
|
||||
if (!from_timer_fn && vcpu->arch.apicv_active) {
|
||||
WARN_ON(kvm_get_running_vcpu() != vcpu);
|
||||
kvm_apic_inject_pending_timer_irqs(apic);
|
||||
return;
|
||||
}
|
||||
|
||||
if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
|
||||
if (apic->lapic_timer.timer_advance_ns)
|
||||
__kvm_wait_lapic_expire(vcpu);
|
||||
@ -1643,18 +1657,23 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
|
||||
expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
|
||||
hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
|
||||
} else
|
||||
apic_timer_expired(apic);
|
||||
apic_timer_expired(apic, false);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
|
||||
{
|
||||
return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
|
||||
}
|
||||
|
||||
static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
|
||||
{
|
||||
ktime_t now, remaining;
|
||||
u64 ns_remaining_old, ns_remaining_new;
|
||||
|
||||
apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
|
||||
* APIC_BUS_CYCLE_NS * apic->divide_count;
|
||||
apic->lapic_timer.period =
|
||||
tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
|
||||
limit_periodic_timer_frequency(apic);
|
||||
|
||||
now = ktime_get();
|
||||
@ -1672,14 +1691,15 @@ static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_diviso
|
||||
apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
|
||||
}
|
||||
|
||||
static bool set_target_expiration(struct kvm_lapic *apic)
|
||||
static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
|
||||
{
|
||||
ktime_t now;
|
||||
u64 tscl = rdtsc();
|
||||
s64 deadline;
|
||||
|
||||
now = ktime_get();
|
||||
apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
|
||||
* APIC_BUS_CYCLE_NS * apic->divide_count;
|
||||
apic->lapic_timer.period =
|
||||
tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
|
||||
|
||||
if (!apic->lapic_timer.period) {
|
||||
apic->lapic_timer.tscdeadline = 0;
|
||||
@ -1687,10 +1707,32 @@ static bool set_target_expiration(struct kvm_lapic *apic)
|
||||
}
|
||||
|
||||
limit_periodic_timer_frequency(apic);
|
||||
deadline = apic->lapic_timer.period;
|
||||
|
||||
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
|
||||
if (unlikely(count_reg != APIC_TMICT)) {
|
||||
deadline = tmict_to_ns(apic,
|
||||
kvm_lapic_get_reg(apic, count_reg));
|
||||
if (unlikely(deadline <= 0))
|
||||
deadline = apic->lapic_timer.period;
|
||||
else if (unlikely(deadline > apic->lapic_timer.period)) {
|
||||
pr_info_ratelimited(
|
||||
"kvm: vcpu %i: requested lapic timer restore with "
|
||||
"starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
|
||||
"Using initial count to start timer.\n",
|
||||
apic->vcpu->vcpu_id,
|
||||
count_reg,
|
||||
kvm_lapic_get_reg(apic, count_reg),
|
||||
deadline, apic->lapic_timer.period);
|
||||
kvm_lapic_set_reg(apic, count_reg, 0);
|
||||
deadline = apic->lapic_timer.period;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
|
||||
nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
|
||||
apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
|
||||
nsec_to_cycles(apic->vcpu, deadline);
|
||||
apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1723,7 +1765,7 @@ static void start_sw_period(struct kvm_lapic *apic)
|
||||
|
||||
if (ktime_after(ktime_get(),
|
||||
apic->lapic_timer.target_expiration)) {
|
||||
apic_timer_expired(apic);
|
||||
apic_timer_expired(apic, false);
|
||||
|
||||
if (apic_lvtt_oneshot(apic))
|
||||
return;
|
||||
@ -1760,7 +1802,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
|
||||
bool expired;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
if (!kvm_x86_ops.set_hv_timer)
|
||||
if (!kvm_can_use_hv_timer(vcpu))
|
||||
return false;
|
||||
|
||||
if (!ktimer->tscdeadline)
|
||||
@ -1785,7 +1827,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
|
||||
if (atomic_read(&ktimer->pending)) {
|
||||
cancel_hv_timer(apic);
|
||||
} else if (expired) {
|
||||
apic_timer_expired(apic);
|
||||
apic_timer_expired(apic, false);
|
||||
cancel_hv_timer(apic);
|
||||
}
|
||||
}
|
||||
@ -1833,9 +1875,9 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
|
||||
/* If the preempt notifier has already run, it also called apic_timer_expired */
|
||||
if (!apic->lapic_timer.hv_timer_in_use)
|
||||
goto out;
|
||||
WARN_ON(swait_active(&vcpu->wq));
|
||||
WARN_ON(rcuwait_active(&vcpu->wait));
|
||||
cancel_hv_timer(apic);
|
||||
apic_timer_expired(apic);
|
||||
apic_timer_expired(apic, false);
|
||||
|
||||
if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
|
||||
advance_periodic_target_expiration(apic);
|
||||
@ -1872,17 +1914,22 @@ void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
|
||||
restart_apic_timer(apic);
|
||||
}
|
||||
|
||||
static void start_apic_timer(struct kvm_lapic *apic)
|
||||
static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
|
||||
{
|
||||
atomic_set(&apic->lapic_timer.pending, 0);
|
||||
|
||||
if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
|
||||
&& !set_target_expiration(apic))
|
||||
&& !set_target_expiration(apic, count_reg))
|
||||
return;
|
||||
|
||||
restart_apic_timer(apic);
|
||||
}
|
||||
|
||||
static void start_apic_timer(struct kvm_lapic *apic)
|
||||
{
|
||||
__start_apic_timer(apic, APIC_TMICT);
|
||||
}
|
||||
|
||||
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
|
||||
{
|
||||
bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
|
||||
@ -2336,7 +2383,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
|
||||
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
|
||||
struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
|
||||
|
||||
apic_timer_expired(apic);
|
||||
apic_timer_expired(apic, true);
|
||||
|
||||
if (lapic_is_periodic(apic)) {
|
||||
advance_periodic_target_expiration(apic);
|
||||
@ -2493,6 +2540,14 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
|
||||
int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
||||
{
|
||||
memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
|
||||
|
||||
/*
|
||||
* Get calculated timer current count for remaining timer period (if
|
||||
* any) and store it in the returned register set.
|
||||
*/
|
||||
__kvm_lapic_set_reg(s->regs, APIC_TMCCT,
|
||||
__apic_read(vcpu->arch.apic, APIC_TMCCT));
|
||||
|
||||
return kvm_apic_state_fixup(vcpu, s, false);
|
||||
}
|
||||
|
||||
@ -2520,7 +2575,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
||||
apic_update_lvtt(apic);
|
||||
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
|
||||
update_divide_count(apic);
|
||||
start_apic_timer(apic);
|
||||
__start_apic_timer(apic, APIC_TMCCT);
|
||||
kvm_apic_update_apicv(vcpu);
|
||||
apic->highest_isr_cache = -1;
|
||||
if (vcpu->arch.apicv_active) {
|
||||
|
@ -161,9 +161,14 @@ static inline u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off)
|
||||
return *((u32 *) (apic->regs + reg_off));
|
||||
}
|
||||
|
||||
static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
|
||||
{
|
||||
*((u32 *) (regs + reg_off)) = val;
|
||||
}
|
||||
|
||||
static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
|
||||
{
|
||||
*((u32 *) (apic->regs + reg_off)) = val;
|
||||
__kvm_lapic_set_reg(apic->regs, reg_off, val);
|
||||
}
|
||||
|
||||
extern struct static_key kvm_no_apic_vcpu;
|
||||
@ -245,7 +250,7 @@ void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
|
||||
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
|
||||
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
|
||||
void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
|
||||
bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu);
|
||||
bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
|
||||
{
|
||||
|
@ -51,13 +51,13 @@ static inline u64 rsvd_bits(int s, int e)
|
||||
return ((1ULL << (e - s + 1)) - 1) << s;
|
||||
}
|
||||
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask);
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
|
||||
|
||||
void
|
||||
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
|
||||
|
||||
void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
|
||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer);
|
||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||
bool accessed_dirty, gpa_t new_eptp);
|
||||
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -61,7 +61,7 @@ static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
{
|
||||
int index, val;
|
||||
|
||||
index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
|
||||
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
|
||||
|
||||
val = slot->arch.gfn_track[mode][index];
|
||||
|
||||
@ -151,7 +151,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
if (!slot)
|
||||
return false;
|
||||
|
||||
index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
|
||||
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
|
||||
return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@
|
||||
#define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
|
||||
|
||||
#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
|
||||
#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
|
||||
#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
|
||||
|
||||
/*
|
||||
* The guest_walker structure emulates the behavior of the hardware page
|
||||
@ -198,7 +198,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
|
||||
!(gpte & PT_GUEST_ACCESSED_MASK))
|
||||
goto no_present;
|
||||
|
||||
if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
|
||||
if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
|
||||
goto no_present;
|
||||
|
||||
return false;
|
||||
@ -436,7 +436,7 @@ retry_walk:
|
||||
gfn = gpte_to_gfn_lvl(pte, walker->level);
|
||||
gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
|
||||
|
||||
if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
|
||||
if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
|
||||
gfn += pse36_gfn_delta(pte);
|
||||
|
||||
real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
|
||||
@ -552,7 +552,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
* we call mmu_set_spte() with host_writable = true because
|
||||
* pte_prefetch_gfn_to_pfn always gets a writable pfn.
|
||||
*/
|
||||
mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
|
||||
mmu_set_spte(vcpu, spte, pte_access, 0, PG_LEVEL_4K, gfn, pfn,
|
||||
true, true);
|
||||
|
||||
kvm_release_pfn_clean(pfn);
|
||||
@ -575,7 +575,7 @@ static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
|
||||
u64 mask;
|
||||
int r, index;
|
||||
|
||||
if (level == PT_PAGE_TABLE_LEVEL) {
|
||||
if (level == PG_LEVEL_4K) {
|
||||
mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
|
||||
base_gpa = pte_gpa & ~mask;
|
||||
index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
|
||||
@ -600,7 +600,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
||||
|
||||
sp = page_header(__pa(sptep));
|
||||
|
||||
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
|
||||
if (sp->role.level > PG_LEVEL_4K)
|
||||
return;
|
||||
|
||||
if (sp->role.direct)
|
||||
@ -812,7 +812,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
|
||||
if (!r) {
|
||||
pgprintk("%s: guest page fault\n", __func__);
|
||||
if (!prefault)
|
||||
inject_page_fault(vcpu, &walker.fault);
|
||||
kvm_inject_emulated_page_fault(vcpu, &walker.fault);
|
||||
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
@ -828,7 +828,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
|
||||
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
|
||||
|
||||
if (lpage_disallowed || is_self_change_mapping)
|
||||
max_level = PT_PAGE_TABLE_LEVEL;
|
||||
max_level = PG_LEVEL_4K;
|
||||
else
|
||||
max_level = walker.level;
|
||||
|
||||
@ -884,7 +884,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
|
||||
{
|
||||
int offset = 0;
|
||||
|
||||
WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
|
||||
WARN_ON(sp->role.level != PG_LEVEL_4K);
|
||||
|
||||
if (PTTYPE == 32)
|
||||
offset = sp->role.quadrant << PT64_LEVEL_BITS;
|
||||
@ -1070,7 +1070,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
|
||||
|
||||
set_spte_ret |= set_spte(vcpu, &sp->spt[i],
|
||||
pte_access, PT_PAGE_TABLE_LEVEL,
|
||||
pte_access, PG_LEVEL_4K,
|
||||
gfn, spte_to_pfn(sp->spt[i]),
|
||||
true, false, host_writable);
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
|
||||
sp = page_header(__pa(sptep));
|
||||
|
||||
if (sp->unsync) {
|
||||
if (level != PT_PAGE_TABLE_LEVEL) {
|
||||
if (level != PG_LEVEL_4K) {
|
||||
audit_printk(vcpu->kvm, "unsync sp: %p "
|
||||
"level = %d\n", sp, level);
|
||||
return;
|
||||
@ -176,7 +176,7 @@ static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (sp->role.level != PT_PAGE_TABLE_LEVEL)
|
||||
if (sp->role.level != PG_LEVEL_4K)
|
||||
return;
|
||||
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
|
||||
@ -200,7 +200,7 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
|
||||
slots = kvm_memslots_for_spte_role(kvm, sp->role);
|
||||
slot = __gfn_to_memslot(slots, sp->gfn);
|
||||
rmap_head = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
|
||||
rmap_head = __gfn_to_rmap(sp->gfn, PG_LEVEL_4K, slot);
|
||||
|
||||
for_each_rmap_spte(rmap_head, &iter, sptep) {
|
||||
if (is_writable_pte(*sptep))
|
||||
|
@ -397,9 +397,9 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
|
||||
__set_bit(pmc->idx, pmu->pmc_in_use);
|
||||
}
|
||||
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr, data);
|
||||
return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
|
||||
}
|
||||
|
||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
|
@ -32,7 +32,7 @@ struct kvm_pmu_ops {
|
||||
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
|
||||
int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
|
||||
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
|
||||
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
void (*refresh)(struct kvm_vcpu *vcpu);
|
||||
void (*init)(struct kvm_vcpu *vcpu);
|
||||
@ -147,7 +147,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
|
||||
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
|
||||
int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user