mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
KVM: pfncache: remove KVM_GUEST_USES_PFN usage
As noted in [1] the KVM_GUEST_USES_PFN usage flag is never set by any callers of kvm_gpc_init(), and for good reason: the implementation is incomplete/broken. And it's not clear that there will ever be a user of KVM_GUEST_USES_PFN, as coordinating vCPUs with mmu_notifier events is non-trivial. Remove KVM_GUEST_USES_PFN and all related code, e.g. dropping KVM_GUEST_USES_PFN also makes the 'vcpu' argument redundant, to avoid having to reason about broken code as __kvm_gpc_refresh() evolves. Moreover, all existing callers specify KVM_HOST_USES_PFN so the usage check in hva_to_pfn_retry() and hence the 'usage' argument to kvm_gpc_init() are also redundant. [1] https://lore.kernel.org/all/ZQiR8IpqOZrOpzHC@google.com Signed-off-by: Paul Durrant <pdurrant@amazon.com> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk> Link: https://lore.kernel.org/r/20240215152916.1158-6-paul@xen.org [sean: explicitly call out that guest usage is incomplete] Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
78b74638eb
commit
a4bff3df51
@ -12056,7 +12056,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs_avail = ~0;
|
||||
vcpu->arch.regs_dirty = ~0;
|
||||
|
||||
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
|
@ -2108,14 +2108,10 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
|
||||
|
||||
timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
|
||||
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
|
||||
KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm);
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm);
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
@ -2158,7 +2154,7 @@ void kvm_xen_init_vm(struct kvm *kvm)
|
||||
{
|
||||
mutex_init(&kvm->arch.xen.xen_lock);
|
||||
idr_init(&kvm->arch.xen.evtchn_ports);
|
||||
kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
|
||||
kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm);
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vm(struct kvm *kvm)
|
||||
|
@ -1319,21 +1319,12 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
*
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
* @kvm: pointer to kvm instance.
|
||||
* @vcpu: vCPU to be used for marking pages dirty and to be woken on
|
||||
* invalidation.
|
||||
* @usage: indicates if the resulting host physical PFN is used while
|
||||
* the @vcpu is IN_GUEST_MODE (in which case invalidation of
|
||||
* the cache from MMU notifiers---but not for KVM memslot
|
||||
* changes!---will also force @vcpu to exit the guest and
|
||||
* refresh the cache); and/or if the PFN used directly
|
||||
* by KVM (and thus needs a kernel virtual mapping).
|
||||
*
|
||||
* This sets up a gfn_to_pfn_cache by initializing locks and assigning the
|
||||
* immutable attributes. Note, the cache must be zero-allocated (or zeroed by
|
||||
* the caller before init).
|
||||
*/
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
|
||||
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
|
||||
|
||||
/**
|
||||
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
|
||||
|
@ -49,12 +49,6 @@ typedef u64 hfn_t;
|
||||
|
||||
typedef hfn_t kvm_pfn_t;
|
||||
|
||||
enum pfn_cache_usage {
|
||||
KVM_GUEST_USES_PFN = BIT(0),
|
||||
KVM_HOST_USES_PFN = BIT(1),
|
||||
KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
|
||||
};
|
||||
|
||||
struct gfn_to_hva_cache {
|
||||
u64 generation;
|
||||
gpa_t gpa;
|
||||
@ -69,13 +63,11 @@ struct gfn_to_pfn_cache {
|
||||
unsigned long uhva;
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct kvm *kvm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct list_head list;
|
||||
rwlock_t lock;
|
||||
struct mutex refresh_lock;
|
||||
void *khva;
|
||||
kvm_pfn_t pfn;
|
||||
enum pfn_cache_usage usage;
|
||||
bool active;
|
||||
bool valid;
|
||||
};
|
||||
|
@ -25,9 +25,7 @@
|
||||
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end, bool may_block)
|
||||
{
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
struct gfn_to_pfn_cache *gpc;
|
||||
bool evict_vcpus = false;
|
||||
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_for_each_entry(gpc, &kvm->gpc_list, list) {
|
||||
@ -37,43 +35,10 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
|
||||
gpc->uhva >= start && gpc->uhva < end) {
|
||||
gpc->valid = false;
|
||||
|
||||
/*
|
||||
* If a guest vCPU could be using the physical address,
|
||||
* it needs to be forced out of guest mode.
|
||||
*/
|
||||
if (gpc->usage & KVM_GUEST_USES_PFN) {
|
||||
if (!evict_vcpus) {
|
||||
evict_vcpus = true;
|
||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
}
|
||||
__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
|
||||
}
|
||||
}
|
||||
write_unlock_irq(&gpc->lock);
|
||||
}
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
|
||||
if (evict_vcpus) {
|
||||
/*
|
||||
* KVM needs to ensure the vCPU is fully out of guest context
|
||||
* before allowing the invalidation to continue.
|
||||
*/
|
||||
unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
|
||||
bool called;
|
||||
|
||||
/*
|
||||
* If the OOM reaper is active, then all vCPUs should have
|
||||
* been stopped already, so perform the request without
|
||||
* KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
|
||||
*/
|
||||
if (!may_block)
|
||||
req &= ~KVM_REQUEST_WAIT;
|
||||
|
||||
called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
|
||||
|
||||
WARN_ON_ONCE(called && !may_block);
|
||||
}
|
||||
}
|
||||
|
||||
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
@ -206,16 +171,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
|
||||
* pfn. Note, kmap() and memremap() can both sleep, so this
|
||||
* too must be done outside of gpc->lock!
|
||||
*/
|
||||
if (gpc->usage & KVM_HOST_USES_PFN) {
|
||||
if (new_pfn == gpc->pfn)
|
||||
new_khva = old_khva;
|
||||
else
|
||||
new_khva = gpc_map(new_pfn);
|
||||
if (new_pfn == gpc->pfn)
|
||||
new_khva = old_khva;
|
||||
else
|
||||
new_khva = gpc_map(new_pfn);
|
||||
|
||||
if (!new_khva) {
|
||||
kvm_release_pfn_clean(new_pfn);
|
||||
goto out_error;
|
||||
}
|
||||
if (!new_khva) {
|
||||
kvm_release_pfn_clean(new_pfn);
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
write_lock_irq(&gpc->lock);
|
||||
@ -346,18 +309,12 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
return __kvm_gpc_refresh(gpc, gpc->gpa, len);
|
||||
}
|
||||
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
|
||||
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
|
||||
{
|
||||
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
|
||||
WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
|
||||
|
||||
rwlock_init(&gpc->lock);
|
||||
mutex_init(&gpc->refresh_lock);
|
||||
|
||||
gpc->kvm = kvm;
|
||||
gpc->vcpu = vcpu;
|
||||
gpc->usage = usage;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->uhva = KVM_HVA_ERR_BAD;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user