KVM: SEV: Add support for SEV-ES intra host migration

For SEV-ES to work with intra host migration the VMSAs, GHCB metadata,
and other SEV-ES info needs to be preserved along with the guest's
memory.

Signed-off-by: Peter Gonda <pgonda@google.com>
Reviewed-by: Marc Orr <marcorr@google.com>
Cc: Marc Orr <marcorr@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Message-Id: <20211021174303.385706-4-pgonda@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Peter Gonda 2021-10-21 10:43:01 -07:00 committed by Paolo Bonzini
parent b56639318b
commit 0b020f5af0

View File

@ -1612,6 +1612,46 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
list_replace_init(&src->regions_list, &dst->regions_list); list_replace_init(&src->regions_list, &dst->regions_list);
} }
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
{
int i;
struct kvm_vcpu *dst_vcpu, *src_vcpu;
struct vcpu_svm *dst_svm, *src_svm;
if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
return -EINVAL;
kvm_for_each_vcpu(i, src_vcpu, src) {
if (!src_vcpu->arch.guest_state_protected)
return -EINVAL;
}
kvm_for_each_vcpu(i, src_vcpu, src) {
src_svm = to_svm(src_vcpu);
dst_vcpu = kvm_get_vcpu(dst, i);
dst_svm = to_svm(dst_vcpu);
/*
* Transfer VMSA and GHCB state to the destination. Nullify and
* clear source fields as appropriate, the state now belongs to
* the destination.
*/
memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
dst_vcpu->arch.guest_state_protected = true;
memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
src_vcpu->arch.guest_state_protected = false;
}
to_kvm_svm(src)->sev_info.es_active = false;
to_kvm_svm(dst)->sev_info.es_active = true;
return 0;
}
int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
{ {
struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info; struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
@ -1640,7 +1680,7 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
if (ret) if (ret)
goto out_fput; goto out_fput;
if (!sev_guest(source_kvm) || sev_es_guest(source_kvm)) { if (!sev_guest(source_kvm)) {
ret = -EINVAL; ret = -EINVAL;
goto out_source; goto out_source;
} }
@ -1660,10 +1700,16 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
if (ret) if (ret)
goto out_dst_vcpu; goto out_dst_vcpu;
if (sev_es_guest(source_kvm)) {
ret = sev_es_migrate_from(kvm, source_kvm);
if (ret)
goto out_source_vcpu;
}
sev_migrate_from(dst_sev, src_sev); sev_migrate_from(dst_sev, src_sev);
kvm_vm_dead(source_kvm); kvm_vm_dead(source_kvm);
ret = 0; ret = 0;
out_source_vcpu:
sev_unlock_vcpus_for_migration(source_kvm); sev_unlock_vcpus_for_migration(source_kvm);
out_dst_vcpu: out_dst_vcpu:
sev_unlock_vcpus_for_migration(kvm); sev_unlock_vcpus_for_migration(kvm);