x86/xen: map foreign pfns for autotranslated guests
When running as a dom0 in PVH mode, foreign pfns that are accessed must be added to our p2m which is managed by xen. This is done via XENMEM_add_to_physmap_range hypercall. This is needed for toolstack building guests and mapping guest memory, xentrace mapping xen pages, etc. Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
This commit is contained in:
parent
1a4b50f674
commit
77945ca73e
@ -2510,6 +2510,95 @@ void __init xen_hvm_init_mmu_ops(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_XEN_PVH
|
||||||
|
/*
|
||||||
|
* Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
|
||||||
|
* space creating new guest on pvh dom0 and needing to map domU pages.
|
||||||
|
*/
|
||||||
|
static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
|
||||||
|
unsigned int domid)
|
||||||
|
{
|
||||||
|
int rc, err = 0;
|
||||||
|
xen_pfn_t gpfn = lpfn;
|
||||||
|
xen_ulong_t idx = fgfn;
|
||||||
|
|
||||||
|
struct xen_add_to_physmap_range xatp = {
|
||||||
|
.domid = DOMID_SELF,
|
||||||
|
.foreign_domid = domid,
|
||||||
|
.size = 1,
|
||||||
|
.space = XENMAPSPACE_gmfn_foreign,
|
||||||
|
};
|
||||||
|
set_xen_guest_handle(xatp.idxs, &idx);
|
||||||
|
set_xen_guest_handle(xatp.gpfns, &gpfn);
|
||||||
|
set_xen_guest_handle(xatp.errs, &err);
|
||||||
|
|
||||||
|
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
|
||||||
|
if (rc < 0)
|
||||||
|
return rc;
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xlate_remove_from_p2m(unsigned long spfn, int count)
|
||||||
|
{
|
||||||
|
struct xen_remove_from_physmap xrp;
|
||||||
|
int i, rc;
|
||||||
|
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
xrp.domid = DOMID_SELF;
|
||||||
|
xrp.gpfn = spfn+i;
|
||||||
|
rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
|
||||||
|
if (rc)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct xlate_remap_data {
|
||||||
|
unsigned long fgfn; /* foreign domain's gfn */
|
||||||
|
pgprot_t prot;
|
||||||
|
domid_t domid;
|
||||||
|
int index;
|
||||||
|
struct page **pages;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct xlate_remap_data *remap = data;
|
||||||
|
unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
|
||||||
|
pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
|
||||||
|
|
||||||
|
rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
native_set_pte(ptep, pteval);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xlate_remap_gfn_range(struct vm_area_struct *vma,
|
||||||
|
unsigned long addr, unsigned long mfn,
|
||||||
|
int nr, pgprot_t prot, unsigned domid,
|
||||||
|
struct page **pages)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
struct xlate_remap_data pvhdata;
|
||||||
|
|
||||||
|
BUG_ON(!pages);
|
||||||
|
|
||||||
|
pvhdata.fgfn = mfn;
|
||||||
|
pvhdata.prot = prot;
|
||||||
|
pvhdata.domid = domid;
|
||||||
|
pvhdata.index = 0;
|
||||||
|
pvhdata.pages = pages;
|
||||||
|
err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
|
||||||
|
xlate_map_pte_fn, &pvhdata);
|
||||||
|
flush_tlb_all();
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#define REMAP_BATCH_SIZE 16
|
#define REMAP_BATCH_SIZE 16
|
||||||
|
|
||||||
struct remap_data {
|
struct remap_data {
|
||||||
@ -2544,11 +2633,18 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
|
|||||||
unsigned long range;
|
unsigned long range;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
|
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
|
||||||
|
|
||||||
|
if (xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||||
|
#ifdef CONFIG_XEN_PVH
|
||||||
|
/* We need to update the local page tables and the xen HAP */
|
||||||
|
return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
|
||||||
|
domid, pages);
|
||||||
|
#else
|
||||||
|
return -EINVAL;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
rmd.mfn = mfn;
|
rmd.mfn = mfn;
|
||||||
rmd.prot = prot;
|
rmd.prot = prot;
|
||||||
|
|
||||||
@ -2586,6 +2682,25 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
|
|||||||
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
|
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
#ifdef CONFIG_XEN_PVH
|
||||||
|
while (numpgs--) {
|
||||||
|
/*
|
||||||
|
* The mmu has already cleaned up the process mmu
|
||||||
|
* resources at this point (lookup_address will return
|
||||||
|
* NULL).
|
||||||
|
*/
|
||||||
|
unsigned long pfn = page_to_pfn(pages[numpgs]);
|
||||||
|
|
||||||
|
xlate_remove_from_p2m(pfn, 1);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* We don't need to flush tlbs because as part of
|
||||||
|
* xlate_remove_from_p2m, the hypervisor will do tlb flushes
|
||||||
|
* after removing the p2m entries from the EPT/NPT
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
#else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
|
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
|
||||||
|
Loading…
Reference in New Issue
Block a user