mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
KVM: x86: Introduce KVM_REQ_DIRTY_RING_SOFT_FULL
The VCPU isn't expected to be runnable when the dirty ring becomes soft full, until the dirty pages are harvested and the dirty ring is reset from userspace. So there is a check in each guest's entrace to see if the dirty ring is soft full or not. The VCPU is stopped from running if its dirty ring has been soft full. The similar check will be needed when the feature is going to be supported on ARM64. As Marc Zyngier suggested, a new event will avoid pointless overhead to check the size of the dirty ring ('vcpu->kvm->dirty_ring_size') in each guest's entrance. Add KVM_REQ_DIRTY_RING_SOFT_FULL. The event is raised when the dirty ring becomes soft full in kvm_dirty_ring_push(). The event is only cleared in the check, done in the newly added helper kvm_dirty_ring_check_request(). Since the VCPU is not runnable when the dirty ring becomes soft full, the KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent the VCPU from running until the dirty pages are harvested and the dirty ring is reset by userspace. kvm_dirty_ring_soft_full() becomes a private function with the newly added helper kvm_dirty_ring_check_request(). The alignment for the various event definitions in kvm_host.h is changed to tab character by the way. In order to avoid using 'container_of()', the argument @ring is replaced by @vcpu in kvm_dirty_ring_push(). Link: https://lore.kernel.org/kvmarm/87lerkwtm5.wl-maz@kernel.org Suggested-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Gavin Shan <gshan@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221110104914.31280-2-gshan@redhat.com
This commit is contained in:
parent
590925a178
commit
cf87ac739e
@ -10499,20 +10499,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
bool req_immediate_exit = false;
|
bool req_immediate_exit = false;
|
||||||
|
|
||||||
/* Forbid vmenter if vcpu dirty ring is soft-full */
|
|
||||||
if (unlikely(vcpu->kvm->dirty_ring_size &&
|
|
||||||
kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) {
|
|
||||||
vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
|
|
||||||
trace_kvm_dirty_ring_exit(vcpu);
|
|
||||||
r = 0;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (kvm_request_pending(vcpu)) {
|
if (kvm_request_pending(vcpu)) {
|
||||||
if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
|
if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
|
||||||
r = -EIO;
|
r = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (kvm_dirty_ring_check_request(vcpu)) {
|
||||||
|
r = 0;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
|
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
|
||||||
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
|
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
|
||||||
r = 0;
|
r = 0;
|
||||||
|
@ -49,7 +49,7 @@ static inline int kvm_dirty_ring_reset(struct kvm *kvm,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvm_dirty_ring_push(struct kvm_dirty_ring *ring,
|
static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
|
||||||
u32 slot, u64 offset)
|
u32 slot, u64 offset)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -64,11 +64,6 @@ static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* CONFIG_HAVE_KVM_DIRTY_RING */
|
#else /* CONFIG_HAVE_KVM_DIRTY_RING */
|
||||||
|
|
||||||
u32 kvm_dirty_ring_get_rsvd_entries(void);
|
u32 kvm_dirty_ring_get_rsvd_entries(void);
|
||||||
@ -84,13 +79,14 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
|
|||||||
* returns =0: successfully pushed
|
* returns =0: successfully pushed
|
||||||
* <0: unable to push, need to wait
|
* <0: unable to push, need to wait
|
||||||
*/
|
*/
|
||||||
void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset);
|
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
|
||||||
|
|
||||||
|
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
/* for use in vm_operations_struct */
|
/* for use in vm_operations_struct */
|
||||||
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
|
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
|
||||||
|
|
||||||
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
|
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
|
||||||
bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
|
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
|
#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
|
||||||
|
|
||||||
|
@ -156,6 +156,7 @@ static inline bool is_error_page(struct page *page)
|
|||||||
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||||
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||||
#define KVM_REQ_UNBLOCK 2
|
#define KVM_REQ_UNBLOCK 2
|
||||||
|
#define KVM_REQ_DIRTY_RING_SOFT_FULL 3
|
||||||
#define KVM_REQUEST_ARCH_BASE 8
|
#define KVM_REQUEST_ARCH_BASE 8
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -26,7 +26,7 @@ static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
|
|||||||
return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
|
return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
|
static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
|
||||||
{
|
{
|
||||||
return kvm_dirty_ring_used(ring) >= ring->soft_limit;
|
return kvm_dirty_ring_used(ring) >= ring->soft_limit;
|
||||||
}
|
}
|
||||||
@ -142,13 +142,19 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
|
|||||||
|
|
||||||
kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
|
kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
|
||||||
|
* by the VCPU thread next time when it enters the guest.
|
||||||
|
*/
|
||||||
|
|
||||||
trace_kvm_dirty_ring_reset(ring);
|
trace_kvm_dirty_ring_reset(ring);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
|
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
|
||||||
{
|
{
|
||||||
|
struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
|
||||||
struct kvm_dirty_gfn *entry;
|
struct kvm_dirty_gfn *entry;
|
||||||
|
|
||||||
/* It should never get full */
|
/* It should never get full */
|
||||||
@ -166,6 +172,28 @@ void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
|
|||||||
kvm_dirty_gfn_set_dirtied(entry);
|
kvm_dirty_gfn_set_dirtied(entry);
|
||||||
ring->dirty_index++;
|
ring->dirty_index++;
|
||||||
trace_kvm_dirty_ring_push(ring, slot, offset);
|
trace_kvm_dirty_ring_push(ring, slot, offset);
|
||||||
|
|
||||||
|
if (kvm_dirty_ring_soft_full(ring))
|
||||||
|
kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* The VCPU isn't runnable when the dirty ring becomes soft full.
|
||||||
|
* The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
|
||||||
|
* the VCPU from running until the dirty pages are harvested and
|
||||||
|
* the dirty ring is reset by userspace.
|
||||||
|
*/
|
||||||
|
if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
|
||||||
|
kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
|
||||||
|
kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
|
||||||
|
vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
|
||||||
|
trace_kvm_dirty_ring_exit(vcpu);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
|
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
|
||||||
|
@ -3314,8 +3314,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
|
|||||||
u32 slot = (memslot->as_id << 16) | memslot->id;
|
u32 slot = (memslot->as_id << 16) | memslot->id;
|
||||||
|
|
||||||
if (kvm->dirty_ring_size)
|
if (kvm->dirty_ring_size)
|
||||||
kvm_dirty_ring_push(&vcpu->dirty_ring,
|
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
|
||||||
slot, rel_gfn);
|
|
||||||
else
|
else
|
||||||
set_bit_le(rel_gfn, memslot->dirty_bitmap);
|
set_bit_le(rel_gfn, memslot->dirty_bitmap);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user