mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 05:11:48 +00:00
xen: Use correctly the Xen memory terminologies
Based on include/xen/mm.h [1], Linux is mistakenly using MFN when GFN is meant, I suspect this is because the first support for Xen was for PV. This resulted in some misimplementation of helpers on ARM and confused developers about the expected behavior. For instance, with pfn_to_mfn, we expect to get an MFN based on the name. Although, if we look at the implementation on x86, it's returning a GFN. For clarity and avoid new confusion, replace any reference to mfn with gfn in any helpers used by PV drivers. The x86 code will still keep some reference of pfn_to_mfn which may be used by all kind of guests No changes as been made in the hypercall field, even though they may be invalid, in order to keep the same as the defintion in xen repo. Note that page_to_mfn has been renamed to xen_page_to_gfn to avoid a name to close to the KVM function gfn_to_page. Take also the opportunity to simplify simple construction such as pfn_to_mfn(page_to_pfn(page)) into xen_page_to_gfn. More complex clean up will come in follow-up patches. [1] http://xenbits.xen.org/gitweb/?p=xen.git;a=commitdiff;h=e758ed14f390342513405dd766e874934573e6cb Signed-off-by: Julien Grall <julien.grall@citrix.com> Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
This commit is contained in:
parent
5192b35de4
commit
0df4f266b3
@ -34,14 +34,15 @@ typedef struct xpaddr {
|
||||
unsigned long __pfn_to_mfn(unsigned long pfn);
|
||||
extern struct rb_root phys_to_mach;
|
||||
|
||||
static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||
/* Pseudo-physical <-> Guest conversion */
|
||||
static inline unsigned long pfn_to_gfn(unsigned long pfn)
|
||||
{
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
static inline unsigned long gfn_to_pfn(unsigned long gfn)
|
||||
{
|
||||
return mfn;
|
||||
return gfn;
|
||||
}
|
||||
|
||||
/* Pseudo-physical <-> BUS conversion */
|
||||
@ -65,9 +66,9 @@ static inline unsigned long bfn_to_pfn(unsigned long bfn)
|
||||
|
||||
#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
|
||||
|
||||
/* VIRT <-> MACHINE conversion */
|
||||
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
|
||||
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
|
||||
/* VIRT <-> GUEST conversion */
|
||||
#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
|
||||
#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << PAGE_SHIFT))
|
||||
|
||||
/* Only used in PV code. But ARM guests are always HVM. */
|
||||
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
|
||||
|
@ -101,6 +101,11 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||
{
|
||||
unsigned long mfn;
|
||||
|
||||
/*
|
||||
* Some x86 code are still using pfn_to_mfn instead of
|
||||
* pfn_to_mfn. This will have to be removed when we figured
|
||||
* out which call.
|
||||
*/
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return pfn;
|
||||
|
||||
@ -147,6 +152,11 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
/*
|
||||
* Some x86 code are still using mfn_to_pfn instead of
|
||||
* gfn_to_pfn. This will have to be removed when we figure
|
||||
* out which call.
|
||||
*/
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
@ -176,9 +186,26 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
|
||||
return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
|
||||
}
|
||||
|
||||
/* Pseudo-physical <-> Guest conversion */
|
||||
static inline unsigned long pfn_to_gfn(unsigned long pfn)
|
||||
{
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return pfn;
|
||||
else
|
||||
return pfn_to_mfn(pfn);
|
||||
}
|
||||
|
||||
static inline unsigned long gfn_to_pfn(unsigned long gfn)
|
||||
{
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return gfn;
|
||||
else
|
||||
return mfn_to_pfn(gfn);
|
||||
}
|
||||
|
||||
/* Pseudo-physical <-> Bus conversion */
|
||||
#define pfn_to_bfn(pfn) pfn_to_mfn(pfn)
|
||||
#define bfn_to_pfn(bfn) mfn_to_pfn(bfn)
|
||||
#define pfn_to_bfn(pfn) pfn_to_gfn(pfn)
|
||||
#define bfn_to_pfn(bfn) gfn_to_pfn(bfn)
|
||||
|
||||
/*
|
||||
* We detect special mappings in one of two ways:
|
||||
@ -219,6 +246,10 @@ static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
|
||||
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
|
||||
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
|
||||
|
||||
/* VIRT <-> GUEST conversion */
|
||||
#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
|
||||
#define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT))
|
||||
|
||||
static inline unsigned long pte_mfn(pte_t pte)
|
||||
{
|
||||
return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
|
@ -453,7 +453,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||
}
|
||||
#endif
|
||||
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
|
||||
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
|
||||
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
|
||||
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
|
||||
BUG();
|
||||
|
||||
|
@ -250,7 +250,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
|
||||
struct blkfront_info *info)
|
||||
{
|
||||
struct grant *gnt_list_entry;
|
||||
unsigned long buffer_mfn;
|
||||
unsigned long buffer_gfn;
|
||||
|
||||
BUG_ON(list_empty(&info->grants));
|
||||
gnt_list_entry = list_first_entry(&info->grants, struct grant,
|
||||
@ -269,10 +269,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
|
||||
BUG_ON(!pfn);
|
||||
gnt_list_entry->pfn = pfn;
|
||||
}
|
||||
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
|
||||
buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn);
|
||||
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
|
||||
info->xbdev->otherend_id,
|
||||
buffer_mfn, 0);
|
||||
buffer_gfn, 0);
|
||||
return gnt_list_entry;
|
||||
}
|
||||
|
||||
|
@ -232,7 +232,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
|
||||
struct xenbus_transaction xbt;
|
||||
|
||||
ret = gnttab_grant_foreign_access(dev->otherend_id,
|
||||
virt_to_mfn(info->page), 0);
|
||||
virt_to_gfn(info->page), 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
info->gref = ret;
|
||||
@ -255,7 +255,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
|
||||
goto error_irqh;
|
||||
}
|
||||
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
|
||||
virt_to_mfn(info->page));
|
||||
virt_to_gfn(info->page));
|
||||
if (ret)
|
||||
goto error_xenbus;
|
||||
ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
|
||||
|
@ -314,7 +314,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
|
||||
} else {
|
||||
copy_gop->source.domid = DOMID_SELF;
|
||||
copy_gop->source.u.gmfn =
|
||||
virt_to_mfn(page_address(page));
|
||||
virt_to_gfn(page_address(page));
|
||||
}
|
||||
copy_gop->source.offset = offset;
|
||||
|
||||
@ -1296,7 +1296,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
|
||||
|
||||
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
|
||||
virt_to_mfn(skb->data);
|
||||
virt_to_gfn(skb->data);
|
||||
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
|
||||
queue->tx_copy_ops[*copy_ops].dest.offset =
|
||||
offset_in_page(skb->data);
|
||||
|
@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
||||
struct sk_buff *skb;
|
||||
unsigned short id;
|
||||
grant_ref_t ref;
|
||||
unsigned long pfn;
|
||||
unsigned long gfn;
|
||||
struct xen_netif_rx_request *req;
|
||||
|
||||
skb = xennet_alloc_one_rx_buffer(queue);
|
||||
@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
||||
BUG_ON((signed short)ref < 0);
|
||||
queue->grant_rx_ref[id] = ref;
|
||||
|
||||
pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
|
||||
gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
|
||||
|
||||
req = RING_GET_REQUEST(&queue->rx, req_prod);
|
||||
gnttab_grant_foreign_access_ref(ref,
|
||||
queue->info->xbdev->otherend_id,
|
||||
pfn_to_mfn(pfn),
|
||||
gfn,
|
||||
0);
|
||||
|
||||
req->id = id;
|
||||
@ -430,8 +430,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
|
||||
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
||||
BUG_ON((signed short)ref < 0);
|
||||
|
||||
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
|
||||
page_to_mfn(page), GNTMAP_readonly);
|
||||
gnttab_grant_foreign_access_ref(ref,
|
||||
queue->info->xbdev->otherend_id,
|
||||
xen_page_to_gfn(page),
|
||||
GNTMAP_readonly);
|
||||
|
||||
queue->tx_skbs[id].skb = skb;
|
||||
queue->grant_tx_page[id] = page;
|
||||
|
@ -377,7 +377,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
|
||||
unsigned int data_len = scsi_bufflen(sc);
|
||||
unsigned int data_grants = 0, seg_grants = 0;
|
||||
struct scatterlist *sg;
|
||||
unsigned long mfn;
|
||||
struct scsiif_request_segment *seg;
|
||||
|
||||
ring_req->nr_segments = 0;
|
||||
@ -420,9 +419,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
|
||||
ref = gnttab_claim_grant_reference(&gref_head);
|
||||
BUG_ON(ref == -ENOSPC);
|
||||
|
||||
mfn = pfn_to_mfn(page_to_pfn(page));
|
||||
gnttab_grant_foreign_access_ref(ref,
|
||||
info->dev->otherend_id, mfn, 1);
|
||||
info->dev->otherend_id,
|
||||
xen_page_to_gfn(page), 1);
|
||||
shadow->gref[ref_cnt] = ref;
|
||||
ring_req->seg[ref_cnt].gref = ref;
|
||||
ring_req->seg[ref_cnt].offset = (uint16_t)off;
|
||||
@ -454,9 +453,10 @@ static int map_data_for_request(struct vscsifrnt_info *info,
|
||||
ref = gnttab_claim_grant_reference(&gref_head);
|
||||
BUG_ON(ref == -ENOSPC);
|
||||
|
||||
mfn = pfn_to_mfn(page_to_pfn(page));
|
||||
gnttab_grant_foreign_access_ref(ref,
|
||||
info->dev->otherend_id, mfn, grant_ro);
|
||||
info->dev->otherend_id,
|
||||
xen_page_to_gfn(page),
|
||||
grant_ro);
|
||||
|
||||
shadow->gref[ref_cnt] = ref;
|
||||
seg->gref = ref;
|
||||
|
@ -265,7 +265,8 @@ static int xen_pv_console_init(void)
|
||||
return 0;
|
||||
}
|
||||
info->evtchn = xen_start_info->console.domU.evtchn;
|
||||
info->intf = mfn_to_virt(xen_start_info->console.domU.mfn);
|
||||
/* GFN == MFN for PV guest */
|
||||
info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
|
||||
info->vtermno = HVC_COOKIE;
|
||||
|
||||
spin_lock(&xencons_lock);
|
||||
@ -390,7 +391,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
|
||||
if (IS_ERR(info->hvc))
|
||||
return PTR_ERR(info->hvc);
|
||||
if (xen_pv_domain())
|
||||
mfn = virt_to_mfn(info->intf);
|
||||
mfn = virt_to_gfn(info->intf);
|
||||
else
|
||||
mfn = __pa(info->intf) >> PAGE_SHIFT;
|
||||
ret = gnttab_alloc_grant_references(1, &gref_head);
|
||||
|
@ -539,7 +539,7 @@ static int xenfb_remove(struct xenbus_device *dev)
|
||||
|
||||
static unsigned long vmalloc_to_mfn(void *address)
|
||||
{
|
||||
return pfn_to_mfn(vmalloc_to_pfn(address));
|
||||
return pfn_to_gfn(vmalloc_to_pfn(address));
|
||||
}
|
||||
|
||||
static void xenfb_init_shared_page(struct xenfb_info *info,
|
||||
@ -586,7 +586,7 @@ static int xenfb_connect_backend(struct xenbus_device *dev,
|
||||
goto unbind_irq;
|
||||
}
|
||||
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
|
||||
virt_to_mfn(info->page));
|
||||
virt_to_gfn(info->page));
|
||||
if (ret)
|
||||
goto error_xenbus;
|
||||
ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
|
||||
|
@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
||||
/* Update direct mapping, invalidate P2M, and add to balloon. */
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
pfn = frame_list[i];
|
||||
frame_list[i] = pfn_to_mfn(pfn);
|
||||
frame_list[i] = pfn_to_gfn(pfn);
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||
|
@ -1688,7 +1688,7 @@ void __init xen_init_IRQ(void)
|
||||
struct physdev_pirq_eoi_gmfn eoi_gmfn;
|
||||
|
||||
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||
eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
|
||||
eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
|
||||
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
|
||||
/* TODO: No PVH support for PIRQ EOI */
|
||||
if (rc != 0) {
|
||||
|
@ -111,7 +111,7 @@ static int init_control_block(int cpu,
|
||||
for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
|
||||
q->head[i] = 0;
|
||||
|
||||
init_control.control_gfn = virt_to_mfn(control_block);
|
||||
init_control.control_gfn = virt_to_gfn(control_block);
|
||||
init_control.offset = 0;
|
||||
init_control.vcpu = cpu;
|
||||
|
||||
@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info)
|
||||
/* Mask all events in this page before adding it. */
|
||||
init_array_page(array_page);
|
||||
|
||||
expand_array.array_gfn = virt_to_mfn(array_page);
|
||||
expand_array.array_gfn = virt_to_gfn(array_page);
|
||||
|
||||
ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
|
||||
if (ret < 0)
|
||||
|
@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
|
||||
|
||||
/* Grant foreign access to the page. */
|
||||
rc = gnttab_grant_foreign_access(op->domid,
|
||||
pfn_to_mfn(page_to_pfn(gref->page)), readonly);
|
||||
xen_page_to_gfn(gref->page),
|
||||
readonly);
|
||||
if (rc < 0)
|
||||
goto undo;
|
||||
gref_ids[i] = gref->gref_id = rc;
|
||||
|
@ -80,7 +80,7 @@ static int xen_suspend(void *data)
|
||||
* is resuming in a new domain.
|
||||
*/
|
||||
si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
|
||||
? virt_to_mfn(xen_start_info)
|
||||
? virt_to_gfn(xen_start_info)
|
||||
: 0);
|
||||
|
||||
xen_arch_post_suspend(si->cancelled);
|
||||
|
@ -131,7 +131,7 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
|
||||
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
|
||||
u32 index, unsigned long pfn)
|
||||
{
|
||||
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
|
||||
unsigned long gmfn = pfn_to_gfn(pfn);
|
||||
|
||||
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
|
||||
gmfn, 0, 0, 0);
|
||||
@ -140,7 +140,7 @@ static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
|
||||
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
|
||||
u32 index, unsigned long pfn)
|
||||
{
|
||||
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
|
||||
unsigned long gmfn = pfn_to_gfn(pfn);
|
||||
|
||||
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
|
||||
gmfn, 0, 0, 0);
|
||||
|
@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
err = gnttab_grant_foreign_access(dev->otherend_id,
|
||||
virt_to_mfn(vaddr), 0);
|
||||
virt_to_gfn(vaddr), 0);
|
||||
if (err < 0) {
|
||||
xenbus_dev_fatal(dev, err,
|
||||
"granting access to ring page");
|
||||
|
@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid)
|
||||
goto out_err;
|
||||
|
||||
gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
|
||||
virt_to_mfn(xen_store_interface), 0 /* writable */);
|
||||
virt_to_gfn(xen_store_interface), 0 /* writable */);
|
||||
|
||||
arg.dom = DOMID_SELF;
|
||||
arg.remote_dom = domid;
|
||||
|
@ -711,9 +711,7 @@ static int __init xenstored_local_init(void)
|
||||
if (!page)
|
||||
goto out_err;
|
||||
|
||||
xen_store_mfn = xen_start_info->store_mfn =
|
||||
pfn_to_mfn(virt_to_phys((void *)page) >>
|
||||
PAGE_SHIFT);
|
||||
xen_store_mfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
|
||||
|
||||
/* Next allocate a local port which xenstored can bind to */
|
||||
alloc_unbound.dom = DOMID_SELF;
|
||||
@ -787,12 +785,12 @@ static int __init xenbus_init(void)
|
||||
err = xenstored_local_init();
|
||||
if (err)
|
||||
goto out_error;
|
||||
xen_store_interface = mfn_to_virt(xen_store_mfn);
|
||||
xen_store_interface = gfn_to_virt(xen_store_mfn);
|
||||
break;
|
||||
case XS_PV:
|
||||
xen_store_evtchn = xen_start_info->store_evtchn;
|
||||
xen_store_mfn = xen_start_info->store_mfn;
|
||||
xen_store_interface = mfn_to_virt(xen_store_mfn);
|
||||
xen_store_interface = gfn_to_virt(xen_store_mfn);
|
||||
break;
|
||||
case XS_HVM:
|
||||
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
|
||||
|
@ -3,9 +3,9 @@
|
||||
|
||||
#include <asm/xen/page.h>
|
||||
|
||||
static inline unsigned long page_to_mfn(struct page *page)
|
||||
static inline unsigned long xen_page_to_gfn(struct page *page)
|
||||
{
|
||||
return pfn_to_mfn(page_to_pfn(page));
|
||||
return pfn_to_gfn(page_to_pfn(page));
|
||||
}
|
||||
|
||||
struct xen_memory_region {
|
||||
|
Loading…
Reference in New Issue
Block a user