forked from Minki/linux
* s390: nested virt fixes (new 4.8 feature)
* x86: fixes for 4.8 regressions * ARM: two small bugfixes -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJX1xaBAAoJEL/70l94x66Db/8H/AuKWs6QbvUNnA+tWITdmFbi ah7R2BoRVak4h6UGYC4OsY9BTuWVeaCx2+8yLIqSdfWoYUJzwiGFPwkuUK/hVSkK /9gZO8SsREAm/1gVck2X2vzATYbfCxKcRsSq0/ocmIQEpRYWKGoJWS6zeiwLZ9kq H/KsAvsGc83VdlPXrhLVQa7js/Tl/M5JlBEbm6i8nIsvhoqZkES4rgwHKBtkxTyU 8oepfNQnYTb2hZhJE0aW+9z3V0O+NKbGW75bKOzrbJBoEUGeHuPpLnP0mZIyEGPU grQkfuWMmfEaWrGahNA9ARpsNcy4Zp0BF5mgJ03OAmgfY+KmJroVk95IvcP9jF4= =+uuA -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: - s390: nested virt fixes (new 4.8 feature) - x86: fixes for 4.8 regressions - ARM: two small bugfixes * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm-arm: Unmap shadow pagetables properly x86, clock: Fix kvm guest tsc initialization arm: KVM: Fix idmap overlap detection when the kernel is idmap'ed KVM: lapic: adjust preemption timer correctly when goes TSC backward KVM: s390: vsie: fix riccbd KVM: s390: don't use current->thread.fpu.* when accessing registers
This commit is contained in:
commit
ac059c4fa7
@ -158,8 +158,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
{
|
||||
int i;
|
||||
|
||||
kvm_free_stage2_pgd(kvm);
|
||||
|
||||
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
||||
if (kvm->vcpus[i]) {
|
||||
kvm_arch_vcpu_free(kvm->vcpus[i]);
|
||||
|
@ -1714,7 +1714,8 @@ int kvm_mmu_init(void)
|
||||
kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
|
||||
|
||||
if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
|
||||
hyp_idmap_start < kern_hyp_va(~0UL)) {
|
||||
hyp_idmap_start < kern_hyp_va(~0UL) &&
|
||||
hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
|
||||
/*
|
||||
* The idmap page is intersecting with the VA space,
|
||||
* it is not safe to continue further.
|
||||
@ -1893,6 +1894,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
|
||||
|
||||
void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
||||
{
|
||||
kvm_free_stage2_pgd(kvm);
|
||||
}
|
||||
|
||||
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
|
@ -2231,9 +2231,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
return -EINVAL;
|
||||
current->thread.fpu.fpc = fpu->fpc;
|
||||
if (MACHINE_HAS_VX)
|
||||
convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
|
||||
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
|
||||
(freg_t *) fpu->fprs);
|
||||
else
|
||||
memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
|
||||
memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2242,9 +2243,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
/* make sure we have the latest values */
|
||||
save_fpu_regs();
|
||||
if (MACHINE_HAS_VX)
|
||||
convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
|
||||
convert_vx_to_fp((freg_t *) fpu->fprs,
|
||||
(__vector128 *) vcpu->run->s.regs.vrs);
|
||||
else
|
||||
memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
|
||||
memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
|
||||
fpu->fpc = current->thread.fpu.fpc;
|
||||
return 0;
|
||||
}
|
||||
|
@ -584,7 +584,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
/* Validity 0x0044 will be checked by SIE */
|
||||
if (rc)
|
||||
goto unpin;
|
||||
scb_s->gvrd = hpa;
|
||||
scb_s->riccbd = hpa;
|
||||
}
|
||||
return 0;
|
||||
unpin:
|
||||
|
@ -289,6 +289,7 @@ void __init kvmclock_init(void)
|
||||
put_cpu();
|
||||
|
||||
x86_platform.calibrate_tsc = kvm_get_tsc_khz;
|
||||
x86_platform.calibrate_cpu = kvm_get_tsc_khz;
|
||||
x86_platform.get_wallclock = kvm_get_wallclock;
|
||||
x86_platform.set_wallclock = kvm_set_wallclock;
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
@ -2743,16 +2743,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
if (tsc_delta < 0)
|
||||
mark_tsc_unstable("KVM discovered backwards TSC");
|
||||
|
||||
if (kvm_lapic_hv_timer_in_use(vcpu) &&
|
||||
kvm_x86_ops->set_hv_timer(vcpu,
|
||||
kvm_get_lapic_tscdeadline_msr(vcpu)))
|
||||
kvm_lapic_switch_to_sw_timer(vcpu);
|
||||
if (check_tsc_unstable()) {
|
||||
u64 offset = kvm_compute_tsc_offset(vcpu,
|
||||
vcpu->arch.last_guest_tsc);
|
||||
kvm_x86_ops->write_tsc_offset(vcpu, offset);
|
||||
vcpu->arch.tsc_catchup = 1;
|
||||
}
|
||||
if (kvm_lapic_hv_timer_in_use(vcpu) &&
|
||||
kvm_x86_ops->set_hv_timer(vcpu,
|
||||
kvm_get_lapic_tscdeadline_msr(vcpu)))
|
||||
kvm_lapic_switch_to_sw_timer(vcpu);
|
||||
/*
|
||||
* On a host with synchronized TSC, there is no need to update
|
||||
* kvmclock on vcpu->cpu migration
|
||||
|
Loading…
Reference in New Issue
Block a user