forked from Minki/linux
KVM: Add physical memory aliasing feature
With this, we can specify that accesses to one physical memory range will be remapped to another. This is useful for the vga window at 0xa0000 which is used as a movable window into the (much larger) framebuffer. Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
parent
954bbbc236
commit
e8207547d2
@ -51,6 +51,7 @@
|
||||
#define UNMAPPED_GVA (~(gpa_t)0)
|
||||
|
||||
#define KVM_MAX_VCPUS 1
|
||||
#define KVM_ALIAS_SLOTS 4
|
||||
#define KVM_MEMORY_SLOTS 4
|
||||
#define KVM_NUM_MMU_PAGES 256
|
||||
#define KVM_MIN_FREE_MMU_PAGES 5
|
||||
@ -312,6 +313,12 @@ struct kvm_vcpu {
|
||||
struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
|
||||
};
|
||||
|
||||
struct kvm_mem_alias {
|
||||
gfn_t base_gfn;
|
||||
unsigned long npages;
|
||||
gfn_t target_gfn;
|
||||
};
|
||||
|
||||
struct kvm_memory_slot {
|
||||
gfn_t base_gfn;
|
||||
unsigned long npages;
|
||||
@ -322,6 +329,8 @@ struct kvm_memory_slot {
|
||||
|
||||
struct kvm {
|
||||
spinlock_t lock; /* protects everything except vcpus */
|
||||
int naliases;
|
||||
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
|
||||
int nmemslots;
|
||||
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
|
||||
/*
|
||||
|
@ -846,7 +846,73 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
|
||||
/*
|
||||
* Set a new alias region. Aliases map a portion of physical memory into
|
||||
* another portion. This is useful for memory windows, for example the PC
|
||||
* VGA region.
|
||||
*/
|
||||
static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
|
||||
struct kvm_memory_alias *alias)
|
||||
{
|
||||
int r, n;
|
||||
struct kvm_mem_alias *p;
|
||||
|
||||
r = -EINVAL;
|
||||
/* General sanity checks */
|
||||
if (alias->memory_size & (PAGE_SIZE - 1))
|
||||
goto out;
|
||||
if (alias->guest_phys_addr & (PAGE_SIZE - 1))
|
||||
goto out;
|
||||
if (alias->slot >= KVM_ALIAS_SLOTS)
|
||||
goto out;
|
||||
if (alias->guest_phys_addr + alias->memory_size
|
||||
< alias->guest_phys_addr)
|
||||
goto out;
|
||||
if (alias->target_phys_addr + alias->memory_size
|
||||
< alias->target_phys_addr)
|
||||
goto out;
|
||||
|
||||
spin_lock(&kvm->lock);
|
||||
|
||||
p = &kvm->aliases[alias->slot];
|
||||
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
|
||||
p->npages = alias->memory_size >> PAGE_SHIFT;
|
||||
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
|
||||
|
||||
for (n = KVM_ALIAS_SLOTS; n > 0; --n)
|
||||
if (kvm->aliases[n - 1].npages)
|
||||
break;
|
||||
kvm->naliases = n;
|
||||
|
||||
spin_unlock(&kvm->lock);
|
||||
|
||||
vcpu_load(&kvm->vcpus[0]);
|
||||
spin_lock(&kvm->lock);
|
||||
kvm_mmu_zap_all(&kvm->vcpus[0]);
|
||||
spin_unlock(&kvm->lock);
|
||||
vcpu_put(&kvm->vcpus[0]);
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int i;
|
||||
struct kvm_mem_alias *alias;
|
||||
|
||||
for (i = 0; i < kvm->naliases; ++i) {
|
||||
alias = &kvm->aliases[i];
|
||||
if (gfn >= alias->base_gfn
|
||||
&& gfn < alias->base_gfn + alias->npages)
|
||||
return alias->target_gfn + gfn - alias->base_gfn;
|
||||
}
|
||||
return gfn;
|
||||
}
|
||||
|
||||
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -859,13 +925,19 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_memslot);
|
||||
|
||||
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
return __gfn_to_memslot(kvm, gfn);
|
||||
}
|
||||
|
||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
struct kvm_memory_slot *slot;
|
||||
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
gfn = unalias_gfn(kvm, gfn);
|
||||
slot = __gfn_to_memslot(kvm, gfn);
|
||||
if (!slot)
|
||||
return NULL;
|
||||
return slot->phys_mem[gfn - slot->base_gfn];
|
||||
@ -2512,6 +2584,17 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
case KVM_SET_MEMORY_ALIAS: {
|
||||
struct kvm_memory_alias alias;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&alias, argp, sizeof alias))
|
||||
goto out;
|
||||
r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
|
||||
if (r)
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
;
|
||||
}
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define KVM_API_VERSION 9
|
||||
#define KVM_API_VERSION 10
|
||||
|
||||
/*
|
||||
* Architectural interrupt line count, and the size of the bitmap needed
|
||||
@ -33,6 +33,13 @@ struct kvm_memory_region {
|
||||
/* for kvm_memory_region::flags */
|
||||
#define KVM_MEM_LOG_DIRTY_PAGES 1UL
|
||||
|
||||
struct kvm_memory_alias {
|
||||
__u32 slot; /* this has a different namespace than memory slots */
|
||||
__u32 flags;
|
||||
__u64 guest_phys_addr;
|
||||
__u64 memory_size;
|
||||
__u64 target_phys_addr;
|
||||
};
|
||||
|
||||
enum kvm_exit_reason {
|
||||
KVM_EXIT_UNKNOWN = 0,
|
||||
@ -261,6 +268,7 @@ struct kvm_signal_mask {
|
||||
*/
|
||||
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
|
||||
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
|
||||
#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
|
||||
|
||||
/*
|
||||
* ioctls for vcpu fds
|
||||
|
Loading…
Reference in New Issue
Block a user