forked from Minki/linux
KVM: x86: Support KVM VMs sharing SEV context
Add a capability for userspace to mirror SEV encryption context from one vm to another. On our side, this is intended to support a Migration Helper vCPU, but it can also be used generically to support other in-guest workloads scheduled by the host. The intention is for the primary guest and the mirror to have nearly identical memslots. The primary benefits of this are that: 1) The VMs do not share KVM contexts (think APIC/MSRs/etc), so they can't accidentally clobber each other. 2) The VMs can have different memory-views, which is necessary for post-copy migration (the migration vCPUs on the target need to read and write to pages, when the primary guest would VMEXIT). This does not change the threat model for AMD SEV. Any memory involved is still owned by the primary guest and its initial state is still attested to through the normal SEV_LAUNCH_* flows. If userspace wanted to circumvent SEV, they could achieve the same effect by simply attaching a vCPU to the primary VM. This patch deliberately leaves userspace in charge of the memslots for the mirror, as it already has the power to mess with them in the primary guest. This patch does not support SEV-ES (much less SNP), as it does not handle handing off attested VMSAs to the mirror. For additional context, we need a Migration Helper because SEV PSP migration is far too slow for our live migration on its own. Using an in-guest migrator lets us speed this up significantly. Signed-off-by: Nathan Tempelman <natet@google.com> Message-Id: <20210408223214.2582277-1-natet@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ee695f22b5
commit
54526d1fd5
@ -6207,6 +6207,22 @@ KVM_RUN_BUS_LOCK flag is used to distinguish between them.
|
|||||||
This capability can be used to check / enable 2nd DAWR feature provided
|
This capability can be used to check / enable 2nd DAWR feature provided
|
||||||
by POWER10 processor.
|
by POWER10 processor.
|
||||||
|
|
||||||
|
7.24 KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
Architectures: x86 SEV enabled
|
||||||
|
Type: vm
|
||||||
|
Parameters: args[0] is the fd of the source vm
|
||||||
|
Returns: 0 on success; ENOTTY on error
|
||||||
|
|
||||||
|
This capability enables userspace to copy encryption context from the vm
|
||||||
|
indicated by the fd to the vm this is called on.
|
||||||
|
|
||||||
|
This is intended to support in-guest workloads scheduled by the host. This
|
||||||
|
allows the in-guest workload to maintain its own NPTs and keeps the two vms
|
||||||
|
from accidentally clobbering each other with interrupts and the like (separate
|
||||||
|
APIC/MSRs/etc).
|
||||||
|
|
||||||
7.25 KVM_CAP_SGX_ATTRIBUTE
|
7.25 KVM_CAP_SGX_ATTRIBUTE
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
@ -6749,4 +6765,4 @@ in the kernel based fast path. If they can not be handled by the kernel,
|
|||||||
they will get passed on to user space. So user space still has to have
|
they will get passed on to user space. So user space still has to have
|
||||||
an implementation for these despite the in kernel acceleration.
|
an implementation for these despite the in kernel acceleration.
|
||||||
|
|
||||||
This capability is always enabled.
|
This capability is always enabled.
|
||||||
|
@ -1349,6 +1349,7 @@ struct kvm_x86_ops {
|
|||||||
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
|
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
|
||||||
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
|
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
|
||||||
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
|
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
|
||||||
|
int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
|
||||||
|
|
||||||
int (*get_msr_feature)(struct kvm_msr_entry *entry);
|
int (*get_msr_feature)(struct kvm_msr_entry *entry);
|
||||||
|
|
||||||
|
@ -66,6 +66,11 @@ static int sev_flush_asids(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_mirroring_enc_context(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
|
||||||
|
}
|
||||||
|
|
||||||
/* Must be called with the sev_bitmap_lock held */
|
/* Must be called with the sev_bitmap_lock held */
|
||||||
static bool __sev_recycle_asids(int min_asid, int max_asid)
|
static bool __sev_recycle_asids(int min_asid, int max_asid)
|
||||||
{
|
{
|
||||||
@ -1122,6 +1127,12 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
|||||||
|
|
||||||
mutex_lock(&kvm->lock);
|
mutex_lock(&kvm->lock);
|
||||||
|
|
||||||
|
/* enc_context_owner handles all memory enc operations */
|
||||||
|
if (is_mirroring_enc_context(kvm)) {
|
||||||
|
r = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
switch (sev_cmd.id) {
|
switch (sev_cmd.id) {
|
||||||
case KVM_SEV_ES_INIT:
|
case KVM_SEV_ES_INIT:
|
||||||
if (!sev_es) {
|
if (!sev_es) {
|
||||||
@ -1185,6 +1196,10 @@ int svm_register_enc_region(struct kvm *kvm,
|
|||||||
if (!sev_guest(kvm))
|
if (!sev_guest(kvm))
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
|
|
||||||
|
/* If kvm is mirroring encryption context it isn't responsible for it */
|
||||||
|
if (is_mirroring_enc_context(kvm))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
|
if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -1251,6 +1266,10 @@ int svm_unregister_enc_region(struct kvm *kvm,
|
|||||||
struct enc_region *region;
|
struct enc_region *region;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* If kvm is mirroring encryption context it isn't responsible for it */
|
||||||
|
if (is_mirroring_enc_context(kvm))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&kvm->lock);
|
mutex_lock(&kvm->lock);
|
||||||
|
|
||||||
if (!sev_guest(kvm)) {
|
if (!sev_guest(kvm)) {
|
||||||
@ -1281,6 +1300,71 @@ failed:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
||||||
|
{
|
||||||
|
struct file *source_kvm_file;
|
||||||
|
struct kvm *source_kvm;
|
||||||
|
struct kvm_sev_info *mirror_sev;
|
||||||
|
unsigned int asid;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
source_kvm_file = fget(source_fd);
|
||||||
|
if (!file_is_kvm(source_kvm_file)) {
|
||||||
|
ret = -EBADF;
|
||||||
|
goto e_source_put;
|
||||||
|
}
|
||||||
|
|
||||||
|
source_kvm = source_kvm_file->private_data;
|
||||||
|
mutex_lock(&source_kvm->lock);
|
||||||
|
|
||||||
|
if (!sev_guest(source_kvm)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto e_source_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Mirrors of mirrors should work, but let's not get silly */
|
||||||
|
if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto e_source_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
asid = to_kvm_svm(source_kvm)->sev_info.asid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The mirror kvm holds an enc_context_owner ref so its asid can't
|
||||||
|
* disappear until we're done with it
|
||||||
|
*/
|
||||||
|
kvm_get_kvm(source_kvm);
|
||||||
|
|
||||||
|
fput(source_kvm_file);
|
||||||
|
mutex_unlock(&source_kvm->lock);
|
||||||
|
mutex_lock(&kvm->lock);
|
||||||
|
|
||||||
|
if (sev_guest(kvm)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto e_mirror_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set enc_context_owner and copy its encryption context over */
|
||||||
|
mirror_sev = &to_kvm_svm(kvm)->sev_info;
|
||||||
|
mirror_sev->enc_context_owner = source_kvm;
|
||||||
|
mirror_sev->asid = asid;
|
||||||
|
mirror_sev->active = true;
|
||||||
|
|
||||||
|
mutex_unlock(&kvm->lock);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
e_mirror_unlock:
|
||||||
|
mutex_unlock(&kvm->lock);
|
||||||
|
kvm_put_kvm(source_kvm);
|
||||||
|
return ret;
|
||||||
|
e_source_unlock:
|
||||||
|
mutex_unlock(&source_kvm->lock);
|
||||||
|
e_source_put:
|
||||||
|
fput(source_kvm_file);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
void sev_vm_destroy(struct kvm *kvm)
|
void sev_vm_destroy(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||||
@ -1290,6 +1374,12 @@ void sev_vm_destroy(struct kvm *kvm)
|
|||||||
if (!sev_guest(kvm))
|
if (!sev_guest(kvm))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
|
||||||
|
if (is_mirroring_enc_context(kvm)) {
|
||||||
|
kvm_put_kvm(sev->enc_context_owner);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&kvm->lock);
|
mutex_lock(&kvm->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4589,6 +4589,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||||||
.mem_enc_reg_region = svm_register_enc_region,
|
.mem_enc_reg_region = svm_register_enc_region,
|
||||||
.mem_enc_unreg_region = svm_unregister_enc_region,
|
.mem_enc_unreg_region = svm_unregister_enc_region,
|
||||||
|
|
||||||
|
.vm_copy_enc_context_from = svm_vm_copy_asid_from,
|
||||||
|
|
||||||
.can_emulate_instruction = svm_can_emulate_instruction,
|
.can_emulate_instruction = svm_can_emulate_instruction,
|
||||||
|
|
||||||
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
|
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
|
||||||
|
@ -68,6 +68,7 @@ struct kvm_sev_info {
|
|||||||
unsigned long pages_locked; /* Number of pages locked */
|
unsigned long pages_locked; /* Number of pages locked */
|
||||||
struct list_head regions_list; /* List of registered regions */
|
struct list_head regions_list; /* List of registered regions */
|
||||||
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
|
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
|
||||||
|
struct kvm *enc_context_owner; /* Owner of copied encryption context */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_svm {
|
struct kvm_svm {
|
||||||
@ -580,6 +581,7 @@ int svm_register_enc_region(struct kvm *kvm,
|
|||||||
struct kvm_enc_region *range);
|
struct kvm_enc_region *range);
|
||||||
int svm_unregister_enc_region(struct kvm *kvm,
|
int svm_unregister_enc_region(struct kvm *kvm,
|
||||||
struct kvm_enc_region *range);
|
struct kvm_enc_region *range);
|
||||||
|
int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
|
||||||
void pre_sev_run(struct vcpu_svm *svm, int cpu);
|
void pre_sev_run(struct vcpu_svm *svm, int cpu);
|
||||||
void __init sev_hardware_setup(void);
|
void __init sev_hardware_setup(void);
|
||||||
void sev_hardware_teardown(void);
|
void sev_hardware_teardown(void);
|
||||||
|
@ -3808,6 +3808,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||||||
#ifdef CONFIG_X86_SGX_KVM
|
#ifdef CONFIG_X86_SGX_KVM
|
||||||
case KVM_CAP_SGX_ATTRIBUTE:
|
case KVM_CAP_SGX_ATTRIBUTE:
|
||||||
#endif
|
#endif
|
||||||
|
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
|
||||||
r = 1;
|
r = 1;
|
||||||
break;
|
break;
|
||||||
case KVM_CAP_SET_GUEST_DEBUG2:
|
case KVM_CAP_SET_GUEST_DEBUG2:
|
||||||
@ -4714,7 +4715,6 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
|||||||
kvm_update_pv_runtime(vcpu);
|
kvm_update_pv_runtime(vcpu);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -5413,6 +5413,11 @@ split_irqchip_unlock:
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
|
||||||
|
r = -EINVAL;
|
||||||
|
if (kvm_x86_ops.vm_copy_enc_context_from)
|
||||||
|
r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]);
|
||||||
|
return r;
|
||||||
default:
|
default:
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
@ -654,6 +654,7 @@ void kvm_exit(void);
|
|||||||
|
|
||||||
void kvm_get_kvm(struct kvm *kvm);
|
void kvm_get_kvm(struct kvm *kvm);
|
||||||
void kvm_put_kvm(struct kvm *kvm);
|
void kvm_put_kvm(struct kvm *kvm);
|
||||||
|
bool file_is_kvm(struct file *file);
|
||||||
void kvm_put_kvm_no_destroy(struct kvm *kvm);
|
void kvm_put_kvm_no_destroy(struct kvm *kvm);
|
||||||
|
|
||||||
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
|
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
|
||||||
|
@ -1080,6 +1080,7 @@ struct kvm_ppc_resize_hpt {
|
|||||||
#define KVM_CAP_PPC_DAWR1 194
|
#define KVM_CAP_PPC_DAWR1 194
|
||||||
#define KVM_CAP_SET_GUEST_DEBUG2 195
|
#define KVM_CAP_SET_GUEST_DEBUG2 195
|
||||||
#define KVM_CAP_SGX_ATTRIBUTE 196
|
#define KVM_CAP_SGX_ATTRIBUTE 196
|
||||||
|
#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
|
||||||
|
|
||||||
#ifdef KVM_CAP_IRQ_ROUTING
|
#ifdef KVM_CAP_IRQ_ROUTING
|
||||||
|
|
||||||
|
@ -4197,6 +4197,12 @@ static struct file_operations kvm_vm_fops = {
|
|||||||
KVM_COMPAT(kvm_vm_compat_ioctl),
|
KVM_COMPAT(kvm_vm_compat_ioctl),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
bool file_is_kvm(struct file *file)
|
||||||
|
{
|
||||||
|
return file && file->f_op == &kvm_vm_fops;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(file_is_kvm);
|
||||||
|
|
||||||
static int kvm_dev_ioctl_create_vm(unsigned long type)
|
static int kvm_dev_ioctl_create_vm(unsigned long type)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
Loading…
Reference in New Issue
Block a user