KVM fix for v4.14(-rc9)

Fix PPC HV host crash that can occur as a result of resizing the guest
 hashed page table.
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABCAAGBQJaBc/CAAoJEED/6hsPKofoI5QH/jxfWMsIcDy0iLPFPywKEV0K
 bPt2e47qdWAYNByIgw2QSJBJtcay+LsXbheBlxSxsGO61ceK3HkUfr9CfXvRCpjQ
 rdZwHArmpZSFTPYOcXfElF2BkJ+1kOZsDphfzrNVwUF3Q0Y+588IGQm1uv2bUKnA
 QAzXFXDDzyKhLqknWoacUlM5UyYKmQwBVSfCFTsQBxhk34agIXH92ZYZLLGWJU6H
 B+sjNu5plCoeorWK1NqMtOPAXHs7oV7gHxZLtDM6RL7jZVmYAbDA4zCqk/i2Vy4z
 ZU1ihPD8QwvGMuJIsPwcC0ojUVwGP7ZMz/F3FhWSR/VzpRe6SU7T7LxMc1ZsGN0=
 =8FAZ
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fix from Radim Krčmář:
 "Fix PPC HV host crash that can occur as a result of resizing the guest
  hashed page table"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: PPC: Book3S HV: Fix exclusion between HPT resizing and other HPT updates
This commit is contained in:
Linus Torvalds 2017-11-10 12:24:42 -08:00
commit 5cf2360ba6
2 changed files with 29 additions and 10 deletions

View File

@ -646,6 +646,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
hnow_r = hpte_new_to_old_r(hnow_r);
}
/*
* If the HPT is being resized, don't update the HPTE,
* instead let the guest retry after the resize operation is complete.
* The synchronization for hpte_setup_done test vs. set is provided
* by the HPTE lock.
*/
if (!kvm->arch.hpte_setup_done)
goto out_unlock;
if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */

View File

@ -2705,11 +2705,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s).
* If the hpte_setup_done flag has been cleared, don't go into the
* guest because that means a HPT resize operation is in progress.
*/
local_irq_disable();
hard_irq_disable();
if (lazy_irq_pending() || need_resched() ||
recheck_signals(&core_info)) {
recheck_signals(&core_info) ||
(!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) {
local_irq_enable();
vc->vcore_state = VCORE_INACTIVE;
/* Unlock all except the primary vcore */
@ -3078,7 +3081,7 @@ out:
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int n_ceded, i;
int n_ceded, i, r;
struct kvmppc_vcore *vc;
struct kvm_vcpu *v;
@ -3132,6 +3135,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) {
/* See if the HPT and VRMA are ready to go */
if (!kvm_is_radix(vcpu->kvm) &&
!vcpu->kvm->arch.hpte_setup_done) {
spin_unlock(&vc->lock);
r = kvmppc_hv_setup_htab_rma(vcpu);
spin_lock(&vc->lock);
if (r) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason = 0;
vcpu->arch.ret = r;
break;
}
}
if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
kvmppc_vcore_end_preempt(vc);
@ -3249,13 +3266,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
smp_mb();
/* On the first time here, set up HTAB and VRMA */
if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) {
r = kvmppc_hv_setup_htab_rma(vcpu);
if (r)
goto out;
}
flush_all_to_thread(current);
/* Save userspace EBB and other register values */
@ -3303,7 +3313,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
mtspr(SPRN_VRSAVE, user_vrsave);
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
return r;