KVM: PPC: Book3S HV: Invalidate TLB when nested vcpu moves physical cpu
This is only done at level 0, since only level 0 knows which physical CPU a vcpu is running on. This does for nested guests what L0 already did for its own guests, which is to flush the TLB on a pCPU when it goes to run a vCPU there, and there is another vCPU in the same VM which previously ran on this pCPU and has now started to run on another pCPU. This is to handle the situation where the other vCPU touched a mapping, moved to another pCPU and did a tlbiel (local-only tlbie) on that new pCPU and thus left behind a stale TLB entry on this pCPU. This introduces a limit on the the vcpu_token values used in the H_ENTER_NESTED hcall -- they must now be less than NR_CPUS. [paulus@ozlabs.org - made prev_cpu array be short[] to reduce memory consumption.] Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
committed by
Michael Ellerman
parent
690ed4cad8
commit
9d0b048da7
@@ -168,6 +168,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
||||
if (err)
|
||||
return H_PARAMETER;
|
||||
|
||||
if (l2_hv.vcpu_token >= NR_CPUS)
|
||||
return H_PARAMETER;
|
||||
|
||||
/* translate lpid */
|
||||
l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
|
||||
if (!l2)
|
||||
@@ -412,6 +415,8 @@ struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
|
||||
goto out_free2;
|
||||
gp->shadow_lpid = shadow_lpid;
|
||||
|
||||
memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
|
||||
|
||||
return gp;
|
||||
|
||||
out_free2:
|
||||
|
||||
Reference in New Issue
Block a user