KVM: mark requests that need synchronization

kvm_make_all_requests() provides a synchronization that waits until all
kicked VCPUs have acknowledged the kick.  This is important for
KVM_REQ_MMU_RELOAD as it prevents freeing while lockless paging is
underway.

This patch adds the synchronization property into all requests that are
currently being used with kvm_make_all_requests() in order to preserve
the current behavior and only introduce a new framework.  Removing it
from requests where it is not necessary is left for future patches.

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2017-04-27 14:33:43 +02:00
parent 178f02ffaf
commit 7a97cec26b
5 changed files with 32 additions and 12 deletions

View File

@ -44,7 +44,7 @@
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
#endif
#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);

View File

@ -41,7 +41,7 @@
#define KVM_VCPU_MAX_FEATURES 4
#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);

View File

@ -61,10 +61,10 @@
#define KVM_REQ_PMI 19
#define KVM_REQ_SMI 20
#define KVM_REQ_MASTERCLOCK_UPDATE 21
#define KVM_REQ_MCLOCK_INPROGRESS (22 | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_SCAN_IOAPIC (23 | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_MCLOCK_INPROGRESS (22 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_SCAN_IOAPIC (23 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE 24
#define KVM_REQ_APIC_PAGE_RELOAD (25 | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APIC_PAGE_RELOAD (25 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH 26
#define KVM_REQ_IOAPIC_EOI_EXIT 27
#define KVM_REQ_HV_RESET 28

View File

@ -117,14 +117,15 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_MASK GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP BIT(8)
#define KVM_REQUEST_WAIT BIT(9)
/*
* Architecture-independent vcpu->requests bit members
* Bits 4-7 are reserved for more arch-independent bits.
*/
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_PENDING_TIMER 2
#define KVM_REQ_UNHALT 3
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_PENDING_TIMER 2
#define KVM_REQ_UNHALT 3
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1

View File

@ -165,6 +165,24 @@ void vcpu_put(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(vcpu_put);
/* TODO: merge with kvm_arch_vcpu_should_kick */
static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
{
int mode = kvm_vcpu_exiting_guest_mode(vcpu);
/*
* We need to wait for the VCPU to reenable interrupts and get out of
* READING_SHADOW_PAGE_TABLES mode.
*/
if (req & KVM_REQUEST_WAIT)
return mode != OUTSIDE_GUEST_MODE;
/*
* Need to kick a running VCPU, but otherwise there is nothing to do.
*/
return mode == IN_GUEST_MODE;
}
static void ack_flush(void *_completed)
{
}
@ -174,6 +192,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
int i, cpu, me;
cpumask_var_t cpus;
bool called = true;
bool wait = req & KVM_REQUEST_WAIT;
struct kvm_vcpu *vcpu;
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
@ -187,13 +206,13 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
continue;
if (cpus != NULL && cpu != -1 && cpu != me &&
kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
kvm_request_needs_ipi(vcpu, req))
cpumask_set_cpu(cpu, cpus);
}
if (unlikely(cpus == NULL))
smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
smp_call_function_many(cpu_online_mask, ack_flush, NULL, wait);
else if (!cpumask_empty(cpus))
smp_call_function_many(cpus, ack_flush, NULL, 1);
smp_call_function_many(cpus, ack_flush, NULL, wait);
else
called = false;
put_cpu();