locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests
Optimize spinlock and mutex busy-loops by providing a vcpu_is_preempted(cpu) function on KVM and Xen platforms. Extend the pv_lock_ops interface accordingly and implement the callbacks on KVM and Xen. Signed-off-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> [ Translated to English. ] Acked-by: Paolo Bonzini <pbonzini@redhat.com> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: benh@kernel.crashing.org Cc: boqun.feng@gmail.com Cc: borntraeger@de.ibm.com Cc: bsingharora@gmail.com Cc: dave@stgolabs.net Cc: jgross@suse.com Cc: kernellwp@gmail.com Cc: konrad.wilk@oracle.com Cc: linuxppc-dev@lists.ozlabs.org Cc: mpe@ellerman.id.au Cc: paulmck@linux.vnet.ibm.com Cc: paulus@samba.org Cc: rkrcmar@redhat.com Cc: virtualization@lists.linux-foundation.org Cc: will.deacon@arm.com Cc: xen-devel-request@lists.xenproject.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1478077718-37424-7-git-send-email-xinhui.pan@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
760928c0da
commit
446f3dc8cc
@ -310,6 +310,8 @@ struct pv_lock_ops {
|
|||||||
|
|
||||||
void (*wait)(u8 *ptr, u8 val);
|
void (*wait)(u8 *ptr, u8 val);
|
||||||
void (*kick)(int cpu);
|
void (*kick)(int cpu);
|
||||||
|
|
||||||
|
bool (*vcpu_is_preempted)(int cpu);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This contains all the paravirt structures: we get a convenient
|
/* This contains all the paravirt structures: we get a convenient
|
||||||
|
@ -26,6 +26,14 @@
|
|||||||
extern struct static_key paravirt_ticketlocks_enabled;
|
extern struct static_key paravirt_ticketlocks_enabled;
|
||||||
static __always_inline bool static_key_false(struct static_key *key);
|
static __always_inline bool static_key_false(struct static_key *key);
|
||||||
|
|
||||||
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||||
|
#define vcpu_is_preempted vcpu_is_preempted
|
||||||
|
static inline bool vcpu_is_preempted(int cpu)
|
||||||
|
{
|
||||||
|
return pv_lock_ops.vcpu_is_preempted(cpu);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <asm/qspinlock.h>
|
#include <asm/qspinlock.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void)
|
|||||||
__raw_callee_save___native_queued_spin_unlock;
|
__raw_callee_save___native_queued_spin_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool native_vcpu_is_preempted(int cpu)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
struct pv_lock_ops pv_lock_ops = {
|
struct pv_lock_ops pv_lock_ops = {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
||||||
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
||||||
.wait = paravirt_nop,
|
.wait = paravirt_nop,
|
||||||
.kick = paravirt_nop,
|
.kick = paravirt_nop,
|
||||||
|
.vcpu_is_preempted = native_vcpu_is_preempted,
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(pv_lock_ops);
|
EXPORT_SYMBOL(pv_lock_ops);
|
||||||
|
Loading…
Reference in New Issue
Block a user