forked from Minki/linux
KVM: Remove memory alias support
As advertised in feature-removal-schedule.txt. Equivalent support is provided by overlapping memory regions. Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
fc34531db3
commit
a1f4d39500
@ -538,17 +538,6 @@ Who: Jan Kiszka <jan.kiszka@web.de>
|
||||
|
||||
----------------------------
|
||||
|
||||
What: KVM memory aliases support
|
||||
When: July 2010
|
||||
Why: Memory aliasing support is used for speeding up guest vga access
|
||||
through the vga windows.
|
||||
|
||||
Modern userspace no longer uses this feature, so it's just bitrotted
|
||||
code and can be removed with no impact.
|
||||
Who: Avi Kivity <avi@redhat.com>
|
||||
|
||||
----------------------------
|
||||
|
||||
What: xtime, wall_to_monotonic
|
||||
When: 2.6.36+
|
||||
Files: kernel/time/timekeeping.c include/linux/time.h
|
||||
|
@ -226,17 +226,7 @@ Type: vm ioctl
|
||||
Parameters: struct kvm_memory_alias (in)
|
||||
Returns: 0 (success), -1 (error)
|
||||
|
||||
struct kvm_memory_alias {
|
||||
__u32 slot; /* this has a different namespace than memory slots */
|
||||
__u32 flags;
|
||||
__u64 guest_phys_addr;
|
||||
__u64 memory_size;
|
||||
__u64 target_phys_addr;
|
||||
};
|
||||
|
||||
Defines a guest physical address space region as an alias to another
|
||||
region. Useful for aliased address, for example the VGA low memory
|
||||
window. Should not be used with userspace memory.
|
||||
This ioctl is obsolete and has been removed.
|
||||
|
||||
4.9 KVM_RUN
|
||||
|
||||
|
@ -1946,11 +1946,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.timer_fired;
|
||||
}
|
||||
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
return gfn;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
|
||||
|
@ -36,11 +36,6 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
return gfn;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
||||
{
|
||||
return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
|
||||
|
@ -723,11 +723,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
return gfn;
|
||||
}
|
||||
|
||||
static int __init kvm_s390_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -69,8 +69,6 @@
|
||||
|
||||
#define IOPL_SHIFT 12
|
||||
|
||||
#define KVM_ALIAS_SLOTS 4
|
||||
|
||||
#define KVM_PERMILLE_MMU_PAGES 20
|
||||
#define KVM_MIN_ALLOC_MMU_PAGES 64
|
||||
#define KVM_MMU_HASH_SHIFT 10
|
||||
@ -362,24 +360,7 @@ struct kvm_vcpu_arch {
|
||||
u64 hv_vapic;
|
||||
};
|
||||
|
||||
struct kvm_mem_alias {
|
||||
gfn_t base_gfn;
|
||||
unsigned long npages;
|
||||
gfn_t target_gfn;
|
||||
#define KVM_ALIAS_INVALID 1UL
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION
|
||||
|
||||
struct kvm_mem_aliases {
|
||||
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
|
||||
int naliases;
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
struct kvm_mem_aliases *aliases;
|
||||
|
||||
unsigned int n_free_mmu_pages;
|
||||
unsigned int n_requested_mmu_pages;
|
||||
unsigned int n_alloc_mmu_pages;
|
||||
@ -655,8 +636,6 @@ void kvm_disable_tdp(void);
|
||||
int complete_pio(struct kvm_vcpu *vcpu);
|
||||
bool kvm_check_iopl(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
|
||||
|
||||
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
|
||||
{
|
||||
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
|
||||
|
@ -434,9 +434,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
|
||||
int *write_count;
|
||||
int i;
|
||||
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
|
||||
slot = gfn_to_memslot_unaliased(kvm, gfn);
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
for (i = PT_DIRECTORY_LEVEL;
|
||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
||||
write_count = slot_largepage_idx(gfn, slot, i);
|
||||
@ -450,8 +448,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
|
||||
int *write_count;
|
||||
int i;
|
||||
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
slot = gfn_to_memslot_unaliased(kvm, gfn);
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
for (i = PT_DIRECTORY_LEVEL;
|
||||
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
||||
write_count = slot_largepage_idx(gfn, slot, i);
|
||||
@ -467,8 +464,7 @@ static int has_wrprotected_page(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot;
|
||||
int *largepage_idx;
|
||||
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
slot = gfn_to_memslot_unaliased(kvm, gfn);
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
if (slot) {
|
||||
largepage_idx = slot_largepage_idx(gfn, slot, level);
|
||||
return *largepage_idx;
|
||||
@ -521,7 +517,6 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
|
||||
|
||||
/*
|
||||
* Take gfn and return the reverse mapping to it.
|
||||
* Note: gfn must be unaliased before this function get called
|
||||
*/
|
||||
|
||||
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
|
||||
@ -561,7 +556,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
||||
|
||||
if (!is_rmap_spte(*spte))
|
||||
return count;
|
||||
gfn = unalias_gfn(vcpu->kvm, gfn);
|
||||
sp = page_header(__pa(spte));
|
||||
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
|
||||
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
|
||||
@ -698,7 +692,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
|
||||
u64 *spte;
|
||||
int i, write_protected = 0;
|
||||
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
|
||||
|
||||
spte = rmap_next(kvm, rmapp, NULL);
|
||||
@ -885,7 +878,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
||||
|
||||
sp = page_header(__pa(spte));
|
||||
|
||||
gfn = unalias_gfn(vcpu->kvm, gfn);
|
||||
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
|
||||
|
||||
kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
|
||||
@ -3510,8 +3502,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
|
||||
if (sp->unsync)
|
||||
continue;
|
||||
|
||||
gfn = unalias_gfn(vcpu->kvm, sp->gfn);
|
||||
slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
|
||||
slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
|
||||
rmapp = &slot->rmap[gfn - slot->base_gfn];
|
||||
|
||||
spte = rmap_next(vcpu->kvm, rmapp, NULL);
|
||||
|
@ -576,7 +576,6 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
|
||||
* Using the cached information from sp->gfns is safe because:
|
||||
* - The spte has a reference to the struct page, so the pfn for a given gfn
|
||||
* can't change unless all sptes pointing to it are nuked first.
|
||||
* - Alias changes zap the entire shadow cache.
|
||||
*/
|
||||
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
bool clear_unsync)
|
||||
@ -611,7 +610,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
return -EINVAL;
|
||||
|
||||
gfn = gpte_to_gfn(gpte);
|
||||
if (unalias_gfn(vcpu->kvm, gfn) != sp->gfns[i] ||
|
||||
if (gfn != sp->gfns[i] ||
|
||||
!is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
|
||||
u64 nonpresent;
|
||||
|
||||
|
@ -2740,115 +2740,6 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
|
||||
return kvm->arch.n_alloc_mmu_pages;
|
||||
}
|
||||
|
||||
gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int i;
|
||||
struct kvm_mem_alias *alias;
|
||||
struct kvm_mem_aliases *aliases;
|
||||
|
||||
aliases = kvm_aliases(kvm);
|
||||
|
||||
for (i = 0; i < aliases->naliases; ++i) {
|
||||
alias = &aliases->aliases[i];
|
||||
if (alias->flags & KVM_ALIAS_INVALID)
|
||||
continue;
|
||||
if (gfn >= alias->base_gfn
|
||||
&& gfn < alias->base_gfn + alias->npages)
|
||||
return alias->target_gfn + gfn - alias->base_gfn;
|
||||
}
|
||||
return gfn;
|
||||
}
|
||||
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int i;
|
||||
struct kvm_mem_alias *alias;
|
||||
struct kvm_mem_aliases *aliases;
|
||||
|
||||
aliases = kvm_aliases(kvm);
|
||||
|
||||
for (i = 0; i < aliases->naliases; ++i) {
|
||||
alias = &aliases->aliases[i];
|
||||
if (gfn >= alias->base_gfn
|
||||
&& gfn < alias->base_gfn + alias->npages)
|
||||
return alias->target_gfn + gfn - alias->base_gfn;
|
||||
}
|
||||
return gfn;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set a new alias region. Aliases map a portion of physical memory into
|
||||
* another portion. This is useful for memory windows, for example the PC
|
||||
* VGA region.
|
||||
*/
|
||||
static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
|
||||
struct kvm_memory_alias *alias)
|
||||
{
|
||||
int r, n;
|
||||
struct kvm_mem_alias *p;
|
||||
struct kvm_mem_aliases *aliases, *old_aliases;
|
||||
|
||||
r = -EINVAL;
|
||||
/* General sanity checks */
|
||||
if (alias->memory_size & (PAGE_SIZE - 1))
|
||||
goto out;
|
||||
if (alias->guest_phys_addr & (PAGE_SIZE - 1))
|
||||
goto out;
|
||||
if (alias->slot >= KVM_ALIAS_SLOTS)
|
||||
goto out;
|
||||
if (alias->guest_phys_addr + alias->memory_size
|
||||
< alias->guest_phys_addr)
|
||||
goto out;
|
||||
if (alias->target_phys_addr + alias->memory_size
|
||||
< alias->target_phys_addr)
|
||||
goto out;
|
||||
|
||||
r = -ENOMEM;
|
||||
aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
|
||||
if (!aliases)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
/* invalidate any gfn reference in case of deletion/shrinking */
|
||||
memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
|
||||
aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
|
||||
old_aliases = kvm->arch.aliases;
|
||||
rcu_assign_pointer(kvm->arch.aliases, aliases);
|
||||
synchronize_srcu_expedited(&kvm->srcu);
|
||||
kvm_mmu_zap_all(kvm);
|
||||
kfree(old_aliases);
|
||||
|
||||
r = -ENOMEM;
|
||||
aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
|
||||
if (!aliases)
|
||||
goto out_unlock;
|
||||
|
||||
memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
|
||||
|
||||
p = &aliases->aliases[alias->slot];
|
||||
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
|
||||
p->npages = alias->memory_size >> PAGE_SHIFT;
|
||||
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
|
||||
p->flags &= ~(KVM_ALIAS_INVALID);
|
||||
|
||||
for (n = KVM_ALIAS_SLOTS; n > 0; --n)
|
||||
if (aliases->aliases[n - 1].npages)
|
||||
break;
|
||||
aliases->naliases = n;
|
||||
|
||||
old_aliases = kvm->arch.aliases;
|
||||
rcu_assign_pointer(kvm->arch.aliases, aliases);
|
||||
synchronize_srcu_expedited(&kvm->srcu);
|
||||
kfree(old_aliases);
|
||||
r = 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
|
||||
{
|
||||
int r;
|
||||
@ -3056,7 +2947,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
union {
|
||||
struct kvm_pit_state ps;
|
||||
struct kvm_pit_state2 ps2;
|
||||
struct kvm_memory_alias alias;
|
||||
struct kvm_pit_config pit_config;
|
||||
} u;
|
||||
|
||||
@ -3101,14 +2991,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
case KVM_GET_NR_MMU_PAGES:
|
||||
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
|
||||
break;
|
||||
case KVM_SET_MEMORY_ALIAS:
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
|
||||
goto out;
|
||||
r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
|
||||
if (r)
|
||||
goto out;
|
||||
break;
|
||||
case KVM_CREATE_IRQCHIP: {
|
||||
struct kvm_pic *vpic;
|
||||
|
||||
@ -5559,12 +5441,6 @@ struct kvm *kvm_arch_create_vm(void)
|
||||
if (!kvm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
|
||||
if (!kvm->arch.aliases) {
|
||||
kfree(kvm);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
||||
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
|
||||
|
||||
@ -5622,7 +5498,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
if (kvm->arch.ept_identity_pagetable)
|
||||
put_page(kvm->arch.ept_identity_pagetable);
|
||||
cleanup_srcu_struct(&kvm->srcu);
|
||||
kfree(kvm->arch.aliases);
|
||||
kfree(kvm);
|
||||
}
|
||||
|
||||
|
@ -65,13 +65,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
|
||||
return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
|
||||
}
|
||||
|
||||
static inline struct kvm_mem_aliases *kvm_aliases(struct kvm *kvm)
|
||||
{
|
||||
return rcu_dereference_check(kvm->arch.aliases,
|
||||
srcu_read_lock_held(&kvm->srcu)
|
||||
|| lockdep_is_held(&kvm->slots_lock));
|
||||
}
|
||||
|
||||
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
|
||||
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
@ -619,6 +619,7 @@ struct kvm_clock_data {
|
||||
*/
|
||||
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
|
||||
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
|
||||
/* KVM_SET_MEMORY_ALIAS is obsolete: */
|
||||
#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
|
||||
#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
|
||||
#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
|
||||
|
@ -286,8 +286,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
int user_alloc);
|
||||
void kvm_disable_largepages(void);
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm);
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
|
||||
gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn);
|
||||
|
||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
|
||||
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
|
||||
@ -564,10 +562,6 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
|
||||
#define unalias_gfn_instantiation unalias_gfn
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_IRQCHIP
|
||||
|
||||
#define KVM_MAX_IRQ_ROUTES 1024
|
||||
|
@ -841,7 +841,7 @@ int kvm_is_error_hva(unsigned long addr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_is_error_hva);
|
||||
|
||||
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
|
||||
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int i;
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
@ -855,20 +855,13 @@ struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
|
||||
|
||||
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
return gfn_to_memslot_unaliased(kvm, gfn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_memslot);
|
||||
|
||||
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int i;
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
|
||||
gfn = unalias_gfn_instantiation(kvm, gfn);
|
||||
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
|
||||
struct kvm_memory_slot *memslot = &slots->memslots[i];
|
||||
|
||||
@ -913,7 +906,6 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
struct kvm_memory_slot *memslot = NULL;
|
||||
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
for (i = 0; i < slots->nmemslots; ++i) {
|
||||
memslot = &slots->memslots[i];
|
||||
|
||||
@ -934,8 +926,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
struct kvm_memory_slot *slot;
|
||||
|
||||
gfn = unalias_gfn_instantiation(kvm, gfn);
|
||||
slot = gfn_to_memslot_unaliased(kvm, gfn);
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
|
||||
return bad_hva();
|
||||
return gfn_to_hva_memslot(slot, gfn);
|
||||
@ -1202,8 +1193,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
memslot = gfn_to_memslot_unaliased(kvm, gfn);
|
||||
memslot = gfn_to_memslot(kvm, gfn);
|
||||
if (memslot && memslot->dirty_bitmap) {
|
||||
unsigned long rel_gfn = gfn - memslot->base_gfn;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user