2013-10-09 17:18:14 +00:00
|
|
|
#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
|
|
|
|
#define _ASM_ARM_XEN_PAGE_COHERENT_H
|
|
|
|
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <linux/dma-attrs.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
2014-11-21 11:05:39 +00:00
|
|
|
void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
|
|
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
|
|
|
enum dma_data_direction dir, struct dma_attrs *attrs);
|
2014-11-11 14:31:56 +00:00
|
|
|
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
|
struct dma_attrs *attrs);
|
|
|
|
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir);
|
|
|
|
|
|
|
|
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir);
|
|
|
|
|
2013-10-09 17:18:14 +00:00
|
|
|
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t flags,
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
{
|
|
|
|
return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
|
|
|
|
void *cpu_addr, dma_addr_t dma_handle,
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
{
|
|
|
|
__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
|
|
|
|
}
|
|
|
|
|
2013-10-25 10:39:49 +00:00
|
|
|
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
2014-11-21 11:04:39 +00:00
|
|
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
|
|
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
2013-10-25 10:39:49 +00:00
|
|
|
{
|
2016-02-08 16:02:06 +00:00
|
|
|
unsigned long page_pfn = page_to_xen_pfn(page);
|
|
|
|
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
|
|
|
|
unsigned long compound_pages =
|
|
|
|
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
|
|
|
|
bool local = (page_pfn <= dev_pfn) &&
|
|
|
|
(dev_pfn - page_pfn < compound_pages);
|
|
|
|
|
2015-09-09 14:18:45 +00:00
|
|
|
/*
|
2016-02-08 16:02:06 +00:00
|
|
|
* Dom0 is mapped 1:1, while the Linux page can span across
|
|
|
|
* multiple Xen pages, it's not possible for it to contain a
|
|
|
|
* mix of local and foreign Xen pages. So if the first xen_pfn
|
|
|
|
* == mfn the page is local otherwise it's a foreign page
|
|
|
|
* grant-mapped in dom0. If the page is local we can safely
|
|
|
|
* call the native dma_ops function, otherwise we call the xen
|
|
|
|
* specific function.
|
2015-09-09 14:18:45 +00:00
|
|
|
*/
|
2014-11-21 11:05:39 +00:00
|
|
|
if (local)
|
|
|
|
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
|
|
|
|
else
|
|
|
|
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
|
2013-10-25 10:39:49 +00:00
|
|
|
}
|
|
|
|
|
2014-11-11 14:31:56 +00:00
|
|
|
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
2013-10-25 10:39:49 +00:00
|
|
|
size_t size, enum dma_data_direction dir,
|
2014-11-11 14:31:56 +00:00
|
|
|
struct dma_attrs *attrs)
|
|
|
|
{
|
|
|
|
unsigned long pfn = PFN_DOWN(handle);
|
2015-09-09 14:18:45 +00:00
|
|
|
/*
|
|
|
|
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
|
|
|
|
* multiple Xen page, it's not possible to have a mix of local and
|
|
|
|
* foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
|
|
|
|
* foreign mfn will always return false. If the page is local we can
|
|
|
|
* safely call the native dma_ops function, otherwise we call the xen
|
|
|
|
* specific function.
|
|
|
|
*/
|
2014-11-11 14:31:56 +00:00
|
|
|
if (pfn_valid(pfn)) {
|
|
|
|
if (__generic_dma_ops(hwdev)->unmap_page)
|
|
|
|
__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
|
|
|
|
} else
|
|
|
|
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
|
|
|
|
}
|
2013-10-25 10:39:49 +00:00
|
|
|
|
2014-11-11 14:31:56 +00:00
|
|
|
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
unsigned long pfn = PFN_DOWN(handle);
|
|
|
|
if (pfn_valid(pfn)) {
|
|
|
|
if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
|
|
|
|
__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
|
|
|
|
} else
|
|
|
|
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
|
|
|
|
}
|
xen/arm: reimplement xen_dma_unmap_page & friends
xen_dma_unmap_page, xen_dma_sync_single_for_cpu and
xen_dma_sync_single_for_device are currently implemented by calling into
the corresponding generic ARM implementation of these functions. In
order to do this, firstly the dma_addr_t handle, that on Xen is a
machine address, needs to be translated into a physical address. The
operation is expensive and inaccurate, given that a single machine
address can correspond to multiple physical addresses in one domain,
because the same page can be granted multiple times by the frontend.
To avoid this problem, we introduce a Xen specific implementation of
xen_dma_unmap_page, xen_dma_sync_single_for_cpu and
xen_dma_sync_single_for_device, that can operate on machine addresses
directly.
The new implementation relies on the fact that the hypervisor creates a
second p2m mapping of any grant pages at physical address == machine
address of the page for dom0. Therefore we can access memory at physical
address == dma_addr_r handle and perform the cache flushing there. Some
cache maintenance operations require a virtual address. Instead of using
ioremap_cache, that is not safe in interrupt context, we allocate a
per-cpu PAGE_KERNEL scratch page and we manually update the pte for it.
arm64 doesn't need cache maintenance operations on unmap for now.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Tested-by: Denis Schneider <v1ne2go@gmail.com>
2014-09-10 22:49:41 +00:00
|
|
|
|
2014-11-11 14:31:56 +00:00
|
|
|
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
unsigned long pfn = PFN_DOWN(handle);
|
|
|
|
if (pfn_valid(pfn)) {
|
|
|
|
if (__generic_dma_ops(hwdev)->sync_single_for_device)
|
|
|
|
__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
|
|
|
|
} else
|
|
|
|
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
|
|
|
|
}
|
2013-10-25 10:39:49 +00:00
|
|
|
|
2013-10-09 17:18:14 +00:00
|
|
|
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
|