KVM: Kill off the old hva-based MMU notifier callbacks

Yank out the hva-based MMU notifier APIs now that all architectures that
use the notifiers have moved to the gfn-based APIs.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210402005658.3024832-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2021-04-01 17:56:54 -07:00 committed by Paolo Bonzini
parent b1c5356e87
commit b4c5936c47
6 changed files with 0 additions and 97 deletions

View File

@ -586,7 +586,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events); struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm);

View File

@ -960,7 +960,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
bool write); bool write);
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
/* Emulation */ /* Emulation */
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);

View File

@ -55,7 +55,6 @@
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
#define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE 13

View File

@ -1727,7 +1727,6 @@ asmlinkage void kvm_spurious_fault(void);
_ASM_EXTABLE(666b, 667b) _ASM_EXTABLE(666b, 667b)
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);

View File

@ -219,7 +219,6 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif #endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
struct kvm_gfn_range { struct kvm_gfn_range {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
gfn_t start; gfn_t start;
@ -231,13 +230,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
#else
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
#endif /* KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS */
#endif #endif
enum { enum {

View File

@ -451,8 +451,6 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
srcu_read_unlock(&kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx);
} }
#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
struct kvm_hva_range { struct kvm_hva_range {
@ -564,8 +562,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
return ret; return ret;
} }
#endif /* KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS */
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long address, unsigned long address,
@ -573,9 +569,6 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
{ {
struct kvm *kvm = mmu_notifier_to_kvm(mn); struct kvm *kvm = mmu_notifier_to_kvm(mn);
#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
int idx;
#endif
trace_kvm_set_spte_hva(address); trace_kvm_set_spte_hva(address);
/* /*
@ -585,26 +578,13 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
*/ */
WARN_ON_ONCE(!kvm->mmu_notifier_count); WARN_ON_ONCE(!kvm->mmu_notifier_count);
#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
#else
idx = srcu_read_lock(&kvm->srcu);
KVM_MMU_LOCK(kvm);
if (kvm_set_spte_hva(kvm, address, pte))
kvm_flush_remote_tlbs(kvm);
KVM_MMU_UNLOCK(kvm);
srcu_read_unlock(&kvm->srcu, idx);
#endif
} }
static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range) const struct mmu_notifier_range *range)
{ {
struct kvm *kvm = mmu_notifier_to_kvm(mn); struct kvm *kvm = mmu_notifier_to_kvm(mn);
#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
const struct kvm_hva_range hva_range = { const struct kvm_hva_range hva_range = {
.start = range->start, .start = range->start,
.end = range->end, .end = range->end,
@ -613,16 +593,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
.flush_on_ret = true, .flush_on_ret = true,
.may_block = mmu_notifier_range_blockable(range), .may_block = mmu_notifier_range_blockable(range),
}; };
#else
int need_tlb_flush = 0, idx;
#endif
trace_kvm_unmap_hva_range(range->start, range->end); trace_kvm_unmap_hva_range(range->start, range->end);
#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
idx = srcu_read_lock(&kvm->srcu);
#endif
KVM_MMU_LOCK(kvm); KVM_MMU_LOCK(kvm);
/* /*
* The count increase must become visible at unlock time as no * The count increase must become visible at unlock time as no
@ -649,20 +622,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
max(kvm->mmu_notifier_range_end, range->end); max(kvm->mmu_notifier_range_end, range->end);
} }
#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
__kvm_handle_hva_range(kvm, &hva_range); __kvm_handle_hva_range(kvm, &hva_range);
#else
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
range->flags);
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush || kvm->tlbs_dirty)
kvm_flush_remote_tlbs(kvm);
#endif
KVM_MMU_UNLOCK(kvm); KVM_MMU_UNLOCK(kvm);
#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
srcu_read_unlock(&kvm->srcu, idx);
#endif
return 0; return 0;
} }
@ -696,27 +658,9 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
unsigned long start, unsigned long start,
unsigned long end) unsigned long end)
{ {
#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young, idx;
#endif
trace_kvm_age_hva(start, end); trace_kvm_age_hva(start, end);
#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
#else
idx = srcu_read_lock(&kvm->srcu);
KVM_MMU_LOCK(kvm);
young = kvm_age_hva(kvm, start, end);
if (young)
kvm_flush_remote_tlbs(kvm);
KVM_MMU_UNLOCK(kvm);
srcu_read_unlock(&kvm->srcu, idx);
return young;
#endif
} }
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
@ -724,11 +668,6 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
unsigned long start, unsigned long start,
unsigned long end) unsigned long end)
{ {
#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young, idx;
#endif
trace_kvm_age_hva(start, end); trace_kvm_age_hva(start, end);
/* /*
@ -744,41 +683,17 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
* cadence. If we find this inaccurate, we might come up with a * cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later. * more sophisticated heuristic later.
*/ */
#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
#else
idx = srcu_read_lock(&kvm->srcu);
KVM_MMU_LOCK(kvm);
young = kvm_age_hva(kvm, start, end);
KVM_MMU_UNLOCK(kvm);
srcu_read_unlock(&kvm->srcu, idx);
return young;
#endif
} }
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young, idx;
#endif
trace_kvm_test_age_hva(address); trace_kvm_test_age_hva(address);
#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
return kvm_handle_hva_range_no_flush(mn, address, address + 1, return kvm_handle_hva_range_no_flush(mn, address, address + 1,
kvm_test_age_gfn); kvm_test_age_gfn);
#else
idx = srcu_read_lock(&kvm->srcu);
KVM_MMU_LOCK(kvm);
young = kvm_test_age_hva(kvm, address);
KVM_MMU_UNLOCK(kvm);
srcu_read_unlock(&kvm->srcu, idx);
return young;
#endif
} }
static void kvm_mmu_notifier_release(struct mmu_notifier *mn, static void kvm_mmu_notifier_release(struct mmu_notifier *mn,