mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
KVM: SEV: Implement gmem hook for initializing private pages
This will handle the RMP table updates needed to put a page into a private state before mapping it into an SEV-SNP guest. Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Message-ID: <20240501085210.2213060-14-michael.roth@amd.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e366f92ea9
commit
4f2e7aa1cf
@ -137,6 +137,7 @@ config KVM_AMD_SEV
|
|||||||
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
|
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
|
||||||
select ARCH_HAS_CC_PLATFORM
|
select ARCH_HAS_CC_PLATFORM
|
||||||
select KVM_GENERIC_PRIVATE_MEM
|
select KVM_GENERIC_PRIVATE_MEM
|
||||||
|
select HAVE_KVM_GMEM_PREPARE
|
||||||
help
|
help
|
||||||
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
|
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
|
||||||
with Encrypted State (SEV-ES) on AMD processors.
|
with Encrypted State (SEV-ES) on AMD processors.
|
||||||
|
@ -4565,3 +4565,101 @@ out:
|
|||||||
out_no_trace:
|
out_no_trace:
|
||||||
put_page(pfn_to_page(pfn));
|
put_page(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
|
||||||
|
{
|
||||||
|
kvm_pfn_t pfn = start;
|
||||||
|
|
||||||
|
while (pfn < end) {
|
||||||
|
int ret, rmp_level;
|
||||||
|
bool assigned;
|
||||||
|
|
||||||
|
ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
|
||||||
|
if (ret) {
|
||||||
|
pr_warn_ratelimited("SEV: Failed to retrieve RMP entry: PFN 0x%llx GFN start 0x%llx GFN end 0x%llx RMP level %d error %d\n",
|
||||||
|
pfn, start, end, rmp_level, ret);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (assigned) {
|
||||||
|
pr_debug("%s: overlap detected, PFN 0x%llx start 0x%llx end 0x%llx RMP level %d\n",
|
||||||
|
__func__, pfn, start, end, rmp_level);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
pfn++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u8 max_level_for_order(int order)
|
||||||
|
{
|
||||||
|
if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
|
||||||
|
return PG_LEVEL_2M;
|
||||||
|
|
||||||
|
return PG_LEVEL_4K;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
|
||||||
|
{
|
||||||
|
kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is a large folio, and the entire 2M range containing the
|
||||||
|
* PFN is currently shared, then the entire 2M-aligned range can be
|
||||||
|
* set to private via a single 2M RMP entry.
|
||||||
|
*/
|
||||||
|
if (max_level_for_order(order) > PG_LEVEL_4K &&
|
||||||
|
is_pfn_range_shared(pfn_aligned, pfn_aligned + PTRS_PER_PMD))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
|
||||||
|
{
|
||||||
|
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||||
|
kvm_pfn_t pfn_aligned;
|
||||||
|
gfn_t gfn_aligned;
|
||||||
|
int level, rc;
|
||||||
|
bool assigned;
|
||||||
|
|
||||||
|
if (!sev_snp_guest(kvm))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
rc = snp_lookup_rmpentry(pfn, &assigned, &level);
|
||||||
|
if (rc) {
|
||||||
|
pr_err_ratelimited("SEV: Failed to look up RMP entry: GFN %llx PFN %llx error %d\n",
|
||||||
|
gfn, pfn, rc);
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (assigned) {
|
||||||
|
pr_debug("%s: already assigned: gfn %llx pfn %llx max_order %d level %d\n",
|
||||||
|
__func__, gfn, pfn, max_order, level);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_large_rmp_possible(kvm, pfn, max_order)) {
|
||||||
|
level = PG_LEVEL_2M;
|
||||||
|
pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
|
||||||
|
gfn_aligned = ALIGN_DOWN(gfn, PTRS_PER_PMD);
|
||||||
|
} else {
|
||||||
|
level = PG_LEVEL_4K;
|
||||||
|
pfn_aligned = pfn;
|
||||||
|
gfn_aligned = gfn;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false);
|
||||||
|
if (rc) {
|
||||||
|
pr_err_ratelimited("SEV: Failed to update RMP entry: GFN %llx PFN %llx level %d error %d\n",
|
||||||
|
gfn, pfn, level, rc);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_debug("%s: updated: gfn %llx pfn %llx pfn_aligned %llx max_order %d level %d\n",
|
||||||
|
__func__, gfn, pfn, pfn_aligned, max_order, level);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
@ -5081,6 +5081,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||||||
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
|
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
|
||||||
.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
|
.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
|
||||||
.alloc_apic_backing_page = svm_alloc_apic_backing_page,
|
.alloc_apic_backing_page = svm_alloc_apic_backing_page,
|
||||||
|
|
||||||
|
.gmem_prepare = sev_gmem_prepare,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -736,6 +736,7 @@ extern unsigned int max_sev_asid;
|
|||||||
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
|
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
|
||||||
void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
|
void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
|
||||||
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
|
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
|
||||||
|
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
|
||||||
#else
|
#else
|
||||||
static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
|
static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
|
||||||
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||||
@ -752,6 +753,10 @@ static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXI
|
|||||||
static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
|
static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
|
||||||
static inline void sev_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
|
static inline void sev_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
|
||||||
static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
|
static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
|
||||||
|
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -13611,6 +13611,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
|
|||||||
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
|
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
|
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
|
||||||
|
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
return kvm->arch.vm_type == KVM_X86_SNP_VM;
|
||||||
|
}
|
||||||
|
|
||||||
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
|
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
|
||||||
{
|
{
|
||||||
return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order);
|
return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order);
|
||||||
|
@ -39,8 +39,8 @@ static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct fol
|
|||||||
gfn = slot->base_gfn + index - slot->gmem.pgoff;
|
gfn = slot->base_gfn + index - slot->gmem.pgoff;
|
||||||
rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page)));
|
rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page)));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx, error %d.\n",
|
pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
|
||||||
index, rc);
|
index, gfn, pfn, rc);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user