mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 12:52:30 +00:00
Merge branch 'kvm-5.16-fixes' into kvm-master
* Fixes for Xen emulation * Kill kvm_map_gfn() / kvm_unmap_gfn() and broken gfn_to_pfn_cache * Fixes for migration of 32-bit nested guests on 64-bit hypervisor * Compilation fixes * More SEV cleanups
This commit is contained in:
commit
817506df9d
@ -363,6 +363,7 @@ union kvm_mmu_extended_role {
|
||||
unsigned int cr4_smap:1;
|
||||
unsigned int cr4_smep:1;
|
||||
unsigned int cr4_la57:1;
|
||||
unsigned int efer_lma:1;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -125,7 +125,7 @@ static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
|
||||
static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 base = vcpu->arch.kvm_cpuid_base;
|
||||
|
||||
|
@ -4682,6 +4682,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
|
||||
/* PKEY and LA57 are active iff long mode is active. */
|
||||
ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
|
||||
ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
|
||||
ext.efer_lma = ____is_efer_lma(regs);
|
||||
}
|
||||
|
||||
ext.valid = 1;
|
||||
|
@ -237,7 +237,6 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
|
||||
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
bool es_active = argp->id == KVM_SEV_ES_INIT;
|
||||
int asid, ret;
|
||||
|
||||
if (kvm->created_vcpus)
|
||||
@ -247,7 +246,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
if (unlikely(sev->active))
|
||||
return ret;
|
||||
|
||||
sev->es_active = es_active;
|
||||
sev->active = true;
|
||||
sev->es_active = argp->id == KVM_SEV_ES_INIT;
|
||||
asid = sev_asid_new(sev);
|
||||
if (asid < 0)
|
||||
goto e_no_asid;
|
||||
@ -257,8 +257,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
if (ret)
|
||||
goto e_free;
|
||||
|
||||
sev->active = true;
|
||||
sev->asid = asid;
|
||||
INIT_LIST_HEAD(&sev->regions_list);
|
||||
|
||||
return 0;
|
||||
@ -268,6 +266,7 @@ e_free:
|
||||
sev->asid = 0;
|
||||
e_no_asid:
|
||||
sev->es_active = false;
|
||||
sev->active = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1530,7 +1529,7 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
|
||||
}
|
||||
|
||||
static bool cmd_allowed_from_miror(u32 cmd_id)
|
||||
static bool is_cmd_allowed_from_mirror(u32 cmd_id)
|
||||
{
|
||||
/*
|
||||
* Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
|
||||
@ -1757,7 +1756,7 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
||||
|
||||
/* Only the enc_context_owner handles some memory enc operations. */
|
||||
if (is_mirroring_enc_context(kvm) &&
|
||||
!cmd_allowed_from_miror(sev_cmd.id)) {
|
||||
!is_cmd_allowed_from_mirror(sev_cmd.id)) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1990,7 +1989,12 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
||||
mutex_unlock(&source_kvm->lock);
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
if (sev_guest(kvm)) {
|
||||
/*
|
||||
* Disallow out-of-band SEV/SEV-ES init if the target is already an
|
||||
* SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
|
||||
* created after SEV/SEV-ES initialization, e.g. to init intercepts.
|
||||
*/
|
||||
if (sev_guest(kvm) || kvm->created_vcpus) {
|
||||
ret = -EINVAL;
|
||||
goto e_mirror_unlock;
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ static __always_inline bool sev_es_guest(struct kvm *kvm)
|
||||
#ifdef CONFIG_KVM_AMD_SEV
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
|
||||
return sev_guest(kvm) && sev->es_active;
|
||||
return sev->es_active && !WARN_ON_ONCE(!sev->active);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
|
@ -670,33 +670,39 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
||||
static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
struct kvm_host_map map;
|
||||
struct vmcs12 *shadow;
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
|
||||
|
||||
if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
|
||||
vmcs12->vmcs_link_pointer == INVALID_GPA)
|
||||
return;
|
||||
|
||||
shadow = get_shadow_vmcs12(vcpu);
|
||||
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
|
||||
if (ghc->gpa != vmcs12->vmcs_link_pointer &&
|
||||
kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
|
||||
vmcs12->vmcs_link_pointer, VMCS12_SIZE))
|
||||
return;
|
||||
|
||||
memcpy(shadow, map.hva, VMCS12_SIZE);
|
||||
kvm_vcpu_unmap(vcpu, &map, false);
|
||||
kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
|
||||
VMCS12_SIZE);
|
||||
}
|
||||
|
||||
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
|
||||
|
||||
if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
|
||||
vmcs12->vmcs_link_pointer == INVALID_GPA)
|
||||
return;
|
||||
|
||||
kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
|
||||
get_shadow_vmcs12(vcpu), VMCS12_SIZE);
|
||||
if (ghc->gpa != vmcs12->vmcs_link_pointer &&
|
||||
kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
|
||||
vmcs12->vmcs_link_pointer, VMCS12_SIZE))
|
||||
return;
|
||||
|
||||
kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
|
||||
VMCS12_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2830,6 +2836,17 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
|
||||
!!(vcpu->arch.efer & EFER_LMA)))
|
||||
return -EINVAL;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
@ -2854,18 +2871,16 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
ia32e = !!(vcpu->arch.efer & EFER_LMA);
|
||||
ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
|
||||
#else
|
||||
ia32e = false;
|
||||
#endif
|
||||
|
||||
if (ia32e) {
|
||||
if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
|
||||
CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
|
||||
if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
|
||||
CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
|
||||
if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
|
||||
CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
|
||||
CC((vmcs12->host_rip) >> 32))
|
||||
return -EINVAL;
|
||||
@ -2910,9 +2925,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
|
||||
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
int r = 0;
|
||||
struct vmcs12 *shadow;
|
||||
struct kvm_host_map map;
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
|
||||
struct vmcs_hdr hdr;
|
||||
|
||||
if (vmcs12->vmcs_link_pointer == INVALID_GPA)
|
||||
return 0;
|
||||
@ -2920,17 +2935,21 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
|
||||
if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
|
||||
return -EINVAL;
|
||||
|
||||
if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
|
||||
if (ghc->gpa != vmcs12->vmcs_link_pointer &&
|
||||
CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
|
||||
vmcs12->vmcs_link_pointer, VMCS12_SIZE)))
|
||||
return -EINVAL;
|
||||
|
||||
if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
|
||||
offsetof(struct vmcs12, hdr),
|
||||
sizeof(hdr))))
|
||||
return -EINVAL;
|
||||
|
||||
shadow = map.hva;
|
||||
if (CC(hdr.revision_id != VMCS12_REVISION) ||
|
||||
CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
|
||||
return -EINVAL;
|
||||
|
||||
if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
|
||||
CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
|
||||
r = -EINVAL;
|
||||
|
||||
kvm_vcpu_unmap(vcpu, &map, false);
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3535,6 +3554,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||
if (nested_vmx_check_controls(vcpu, vmcs12))
|
||||
return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
||||
|
||||
if (nested_vmx_check_address_space_size(vcpu, vmcs12))
|
||||
return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
|
||||
|
||||
if (nested_vmx_check_host_state(vcpu, vmcs12))
|
||||
return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
|
||||
|
||||
@ -5264,10 +5286,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
|
||||
return 1;
|
||||
|
||||
if (vmx->nested.current_vmptr != vmptr) {
|
||||
struct kvm_host_map map;
|
||||
struct vmcs12 *new_vmcs12;
|
||||
struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
|
||||
struct vmcs_hdr hdr;
|
||||
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
|
||||
if (ghc->gpa != vmptr &&
|
||||
kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
|
||||
/*
|
||||
* Reads from an unbacked page return all 1s,
|
||||
* which means that the 32 bits located at the
|
||||
@ -5278,12 +5301,16 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
|
||||
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
|
||||
}
|
||||
|
||||
new_vmcs12 = map.hva;
|
||||
if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
|
||||
offsetof(struct vmcs12, hdr),
|
||||
sizeof(hdr))) {
|
||||
return nested_vmx_fail(vcpu,
|
||||
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
|
||||
}
|
||||
|
||||
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
|
||||
(new_vmcs12->hdr.shadow_vmcs &&
|
||||
if (hdr.revision_id != VMCS12_REVISION ||
|
||||
(hdr.shadow_vmcs &&
|
||||
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
|
||||
kvm_vcpu_unmap(vcpu, &map, false);
|
||||
return nested_vmx_fail(vcpu,
|
||||
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
|
||||
}
|
||||
@ -5294,8 +5321,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
|
||||
* Load VMCS12 from guest memory since it is not already
|
||||
* cached.
|
||||
*/
|
||||
memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
|
||||
kvm_vcpu_unmap(vcpu, &map, false);
|
||||
if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
|
||||
VMCS12_SIZE)) {
|
||||
return nested_vmx_fail(vcpu,
|
||||
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
|
||||
}
|
||||
|
||||
set_current_vmptr(vmx, vmptr);
|
||||
}
|
||||
|
@ -141,6 +141,16 @@ struct nested_vmx {
|
||||
*/
|
||||
struct vmcs12 *cached_shadow_vmcs12;
|
||||
|
||||
/*
|
||||
* GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
|
||||
*/
|
||||
struct gfn_to_hva_cache shadow_vmcs12_cache;
|
||||
|
||||
/*
|
||||
* GPA to HVA cache for VMCS12
|
||||
*/
|
||||
struct gfn_to_hva_cache vmcs12_cache;
|
||||
|
||||
/*
|
||||
* Indicates if the shadow vmcs or enlightened vmcs must be updated
|
||||
* with the data held by struct vmcs12.
|
||||
|
@ -3307,9 +3307,9 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
|
||||
"xor %1, %1\n"
|
||||
"2:\n"
|
||||
_ASM_EXTABLE_UA(1b, 2b)
|
||||
: "+r" (st_preempted),
|
||||
"+&r" (err)
|
||||
: "m" (st->preempted));
|
||||
: "+q" (st_preempted),
|
||||
"+&r" (err),
|
||||
"+m" (st->preempted));
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -9547,12 +9547,16 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
|
||||
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
|
||||
return;
|
||||
|
||||
if (to_hv_vcpu(vcpu))
|
||||
if (to_hv_vcpu(vcpu)) {
|
||||
bitmap_or((ulong *)eoi_exit_bitmap,
|
||||
vcpu->arch.ioapic_handled_vectors,
|
||||
to_hv_synic(vcpu)->vec_bitmap, 256);
|
||||
static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
|
||||
return;
|
||||
}
|
||||
|
||||
static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
|
||||
static_call(kvm_x86_load_eoi_exitmap)(
|
||||
vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
|
||||
}
|
||||
|
||||
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||
|
@ -127,9 +127,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
state_entry_time = vx->runstate_entry_time;
|
||||
state_entry_time |= XEN_RUNSTATE_UPDATE;
|
||||
|
||||
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state_entry_time) !=
|
||||
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
|
||||
sizeof(state_entry_time));
|
||||
BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
|
||||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
|
||||
sizeof(state_entry_time));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
@ -144,9 +144,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
|
||||
offsetof(struct compat_vcpu_runstate_info, state));
|
||||
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state) !=
|
||||
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
|
||||
sizeof(vx->current_runstate));
|
||||
BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) !=
|
||||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
|
||||
sizeof(vx->current_runstate));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
@ -163,9 +163,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
|
||||
BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
|
||||
offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
|
||||
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
|
||||
sizeof(((struct compat_vcpu_runstate_info *)0)->time));
|
||||
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
|
||||
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
|
||||
sizeof_field(struct compat_vcpu_runstate_info, time));
|
||||
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
|
||||
sizeof(vx->runstate_times));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
@ -205,9 +205,9 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
|
||||
offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
|
||||
BUILD_BUG_ON(sizeof(rc) !=
|
||||
sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending));
|
||||
sizeof_field(struct vcpu_info, evtchn_upcall_pending));
|
||||
BUILD_BUG_ON(sizeof(rc) !=
|
||||
sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending));
|
||||
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
|
||||
|
||||
/*
|
||||
* For efficiency, this mirrors the checks for using the valid
|
||||
@ -299,7 +299,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
||||
break;
|
||||
|
||||
case KVM_XEN_ATTR_TYPE_SHARED_INFO:
|
||||
data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_gfn);
|
||||
data->u.shared_info.gfn = kvm->arch.xen.shinfo_gfn;
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
|
@ -874,7 +874,7 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn);
|
||||
void kvm_set_pfn_dirty(kvm_pfn_t pfn);
|
||||
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
|
||||
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
|
||||
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
|
||||
int len);
|
||||
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
|
||||
@ -950,12 +950,8 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
|
||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
||||
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache, bool atomic);
|
||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
||||
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
|
||||
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
|
||||
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
|
||||
|
@ -53,13 +53,6 @@ struct gfn_to_hva_cache {
|
||||
struct kvm_memory_slot *memslot;
|
||||
};
|
||||
|
||||
struct gfn_to_pfn_cache {
|
||||
u64 generation;
|
||||
gfn_t gfn;
|
||||
kvm_pfn_t pfn;
|
||||
bool dirty;
|
||||
};
|
||||
|
||||
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
||||
/*
|
||||
* Memory caches are used to preallocate memory ahead of various MMU flows,
|
||||
|
@ -2548,72 +2548,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
||||
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
|
||||
{
|
||||
if (pfn == 0)
|
||||
return;
|
||||
|
||||
if (cache)
|
||||
cache->pfn = cache->gfn = 0;
|
||||
|
||||
if (dirty)
|
||||
kvm_release_pfn_dirty(pfn);
|
||||
else
|
||||
kvm_release_pfn_clean(pfn);
|
||||
}
|
||||
|
||||
static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
struct gfn_to_pfn_cache *cache, u64 gen)
|
||||
{
|
||||
kvm_release_pfn(cache->pfn, cache->dirty, cache);
|
||||
|
||||
cache->pfn = gfn_to_pfn_memslot(slot, gfn);
|
||||
cache->gfn = gfn;
|
||||
cache->dirty = false;
|
||||
cache->generation = gen;
|
||||
}
|
||||
|
||||
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
||||
struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache,
|
||||
bool atomic)
|
||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
||||
{
|
||||
kvm_pfn_t pfn;
|
||||
void *hva = NULL;
|
||||
struct page *page = KVM_UNMAPPED_PAGE;
|
||||
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
|
||||
u64 gen = slots->generation;
|
||||
|
||||
if (!map)
|
||||
return -EINVAL;
|
||||
|
||||
if (cache) {
|
||||
if (!cache->pfn || cache->gfn != gfn ||
|
||||
cache->generation != gen) {
|
||||
if (atomic)
|
||||
return -EAGAIN;
|
||||
kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
|
||||
}
|
||||
pfn = cache->pfn;
|
||||
} else {
|
||||
if (atomic)
|
||||
return -EAGAIN;
|
||||
pfn = gfn_to_pfn_memslot(slot, gfn);
|
||||
}
|
||||
pfn = gfn_to_pfn(vcpu->kvm, gfn);
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return -EINVAL;
|
||||
|
||||
if (pfn_valid(pfn)) {
|
||||
page = pfn_to_page(pfn);
|
||||
if (atomic)
|
||||
hva = kmap_atomic(page);
|
||||
else
|
||||
hva = kmap(page);
|
||||
hva = kmap(page);
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
} else if (!atomic) {
|
||||
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2627,27 +2591,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache, bool atomic)
|
||||
{
|
||||
return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
|
||||
cache, atomic);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_map_gfn);
|
||||
|
||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
||||
{
|
||||
return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
|
||||
NULL, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
|
||||
|
||||
static void __kvm_unmap_gfn(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache,
|
||||
bool dirty, bool atomic)
|
||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
||||
{
|
||||
if (!map)
|
||||
return;
|
||||
@ -2655,45 +2601,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm,
|
||||
if (!map->hva)
|
||||
return;
|
||||
|
||||
if (map->page != KVM_UNMAPPED_PAGE) {
|
||||
if (atomic)
|
||||
kunmap_atomic(map->hva);
|
||||
else
|
||||
kunmap(map->page);
|
||||
}
|
||||
if (map->page != KVM_UNMAPPED_PAGE)
|
||||
kunmap(map->page);
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
else if (!atomic)
|
||||
memunmap(map->hva);
|
||||
else
|
||||
WARN_ONCE(1, "Unexpected unmapping in atomic context");
|
||||
memunmap(map->hva);
|
||||
#endif
|
||||
|
||||
if (dirty)
|
||||
mark_page_dirty_in_slot(kvm, memslot, map->gfn);
|
||||
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
|
||||
|
||||
if (cache)
|
||||
cache->dirty |= dirty;
|
||||
else
|
||||
kvm_release_pfn(map->pfn, dirty, NULL);
|
||||
kvm_release_pfn(map->pfn, dirty);
|
||||
|
||||
map->hva = NULL;
|
||||
map->page = NULL;
|
||||
}
|
||||
|
||||
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
|
||||
{
|
||||
__kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
|
||||
cache, dirty, atomic);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
|
||||
|
||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
||||
{
|
||||
__kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
|
||||
map, NULL, dirty, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
||||
|
||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
|
Loading…
Reference in New Issue
Block a user