KVM: VMX: Fold __vmx_vcpu_run() back into vmx_vcpu_run()
...now that the code is no longer tagged with STACK_FRAME_NON_STANDARD. Arguably, providing __vmx_vcpu_run() to break up vmx_vcpu_run() is valuable on its own, but the previous split was purposely made as small as possible to limit the effects STACK_FRAME_NON_STANDARD. In other words, the current split is now completely arbitrary and likely not the most logical. This also allows renaming ____vmx_vcpu_run() to __vmx_vcpu_run() in a future patch. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
5e0781df18
commit
c823dd5c0f
@ -6371,37 +6371,6 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
|
|
||||||
{
|
|
||||||
if (static_branch_unlikely(&vmx_l1d_should_flush))
|
|
||||||
vmx_l1d_flush(vcpu);
|
|
||||||
|
|
||||||
if (vcpu->arch.cr2 != read_cr2())
|
|
||||||
write_cr2(vcpu->arch.cr2);
|
|
||||||
|
|
||||||
asm(
|
|
||||||
"call ____vmx_vcpu_run \n\t"
|
|
||||||
: ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
"=D"((int){0}), "=S"((int){0})
|
|
||||||
: "D"(vmx), "S"(&vcpu->arch.regs),
|
|
||||||
#else
|
|
||||||
"=a"((int){0}), "=d"((int){0})
|
|
||||||
: "a"(vmx), "d"(&vcpu->arch.regs),
|
|
||||||
#endif
|
|
||||||
"b"(vmx->loaded_vmcs->launched)
|
|
||||||
: "cc", "memory"
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
, "rax", "rcx", "rdx"
|
|
||||||
, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
|
|
||||||
#else
|
|
||||||
, "ecx", "edi", "esi"
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
|
|
||||||
vcpu->arch.cr2 = read_cr2();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
@ -6469,7 +6438,33 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
||||||
|
|
||||||
__vmx_vcpu_run(vcpu, vmx);
|
if (static_branch_unlikely(&vmx_l1d_should_flush))
|
||||||
|
vmx_l1d_flush(vcpu);
|
||||||
|
|
||||||
|
if (vcpu->arch.cr2 != read_cr2())
|
||||||
|
write_cr2(vcpu->arch.cr2);
|
||||||
|
|
||||||
|
asm(
|
||||||
|
"call ____vmx_vcpu_run \n\t"
|
||||||
|
: ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
"=D"((int){0}), "=S"((int){0})
|
||||||
|
: "D"(vmx), "S"(&vcpu->arch.regs),
|
||||||
|
#else
|
||||||
|
"=a"((int){0}), "=d"((int){0})
|
||||||
|
: "a"(vmx), "d"(&vcpu->arch.regs),
|
||||||
|
#endif
|
||||||
|
"b"(vmx->loaded_vmcs->launched)
|
||||||
|
: "cc", "memory"
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
, "rax", "rcx", "rdx"
|
||||||
|
, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
|
||||||
|
#else
|
||||||
|
, "ecx", "edi", "esi"
|
||||||
|
#endif
|
||||||
|
);
|
||||||
|
|
||||||
|
vcpu->arch.cr2 = read_cr2();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do not use IBRS in the kernel. If this vCPU has used the
|
* We do not use IBRS in the kernel. If this vCPU has used the
|
||||||
|
Loading…
Reference in New Issue
Block a user