xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance
Copy the arm64 code that uses the dma-direct/swiotlb helpers for DMA on-coherent devices. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
This commit is contained in:
@@ -14,9 +14,6 @@ struct dev_archdata {
|
|||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
||||||
struct dma_iommu_mapping *mapping;
|
struct dma_iommu_mapping *mapping;
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_XEN
|
|
||||||
const struct dma_map_ops *dev_dma_ops;
|
|
||||||
#endif
|
#endif
|
||||||
unsigned int dma_coherent:1;
|
unsigned int dma_coherent:1;
|
||||||
unsigned int dma_ops_setup:1;
|
unsigned int dma_ops_setup:1;
|
||||||
|
|||||||
@@ -6,23 +6,37 @@
|
|||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <xen/arm/page-coherent.h>
|
#include <xen/arm/page-coherent.h>
|
||||||
|
|
||||||
static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
|
|
||||||
{
|
|
||||||
if (dev && dev->archdata.dev_dma_ops)
|
|
||||||
return dev->archdata.dev_dma_ops;
|
|
||||||
return get_arch_dma_ops(NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
|
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
|
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
|
||||||
{
|
{
|
||||||
return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
|
return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
|
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
|
||||||
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
|
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
|
||||||
{
|
{
|
||||||
xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
|
dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||||
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
unsigned long pfn = PFN_DOWN(handle);
|
||||||
|
|
||||||
|
if (pfn_valid(pfn))
|
||||||
|
dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
|
||||||
|
else
|
||||||
|
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
||||||
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
unsigned long pfn = PFN_DOWN(handle);
|
||||||
|
if (pfn_valid(pfn))
|
||||||
|
dma_direct_sync_single_for_device(hwdev, handle, size, dir);
|
||||||
|
else
|
||||||
|
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||||
@@ -36,17 +50,8 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
|||||||
bool local = (page_pfn <= dev_pfn) &&
|
bool local = (page_pfn <= dev_pfn) &&
|
||||||
(dev_pfn - page_pfn < compound_pages);
|
(dev_pfn - page_pfn < compound_pages);
|
||||||
|
|
||||||
/*
|
|
||||||
* Dom0 is mapped 1:1, while the Linux page can span across
|
|
||||||
* multiple Xen pages, it's not possible for it to contain a
|
|
||||||
* mix of local and foreign Xen pages. So if the first xen_pfn
|
|
||||||
* == mfn the page is local otherwise it's a foreign page
|
|
||||||
* grant-mapped in dom0. If the page is local we can safely
|
|
||||||
* call the native dma_ops function, otherwise we call the xen
|
|
||||||
* specific function.
|
|
||||||
*/
|
|
||||||
if (local)
|
if (local)
|
||||||
xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
|
dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
|
||||||
else
|
else
|
||||||
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
|
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
|
||||||
}
|
}
|
||||||
@@ -63,33 +68,10 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|||||||
* safely call the native dma_ops function, otherwise we call the xen
|
* safely call the native dma_ops function, otherwise we call the xen
|
||||||
* specific function.
|
* specific function.
|
||||||
*/
|
*/
|
||||||
if (pfn_valid(pfn)) {
|
if (pfn_valid(pfn))
|
||||||
if (xen_get_dma_ops(hwdev)->unmap_page)
|
dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
|
||||||
xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
|
else
|
||||||
} else
|
|
||||||
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
|
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
||||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
||||||
{
|
|
||||||
unsigned long pfn = PFN_DOWN(handle);
|
|
||||||
if (pfn_valid(pfn)) {
|
|
||||||
if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
|
|
||||||
xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
|
|
||||||
} else
|
|
||||||
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
|
||||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
||||||
{
|
|
||||||
unsigned long pfn = PFN_DOWN(handle);
|
|
||||||
if (pfn_valid(pfn)) {
|
|
||||||
if (xen_get_dma_ops(hwdev)->sync_single_for_device)
|
|
||||||
xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
|
|
||||||
} else
|
|
||||||
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
|
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
|
||||||
|
|||||||
@@ -1105,10 +1105,6 @@ static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
|
|||||||
* 32-bit DMA.
|
* 32-bit DMA.
|
||||||
* Use the generic dma-direct / swiotlb ops code in that case, as that
|
* Use the generic dma-direct / swiotlb ops code in that case, as that
|
||||||
* handles bounce buffering for us.
|
* handles bounce buffering for us.
|
||||||
*
|
|
||||||
* Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
|
|
||||||
* latter is also selected by the Xen code, but that code for now relies
|
|
||||||
* on non-NULL dev_dma_ops. To be cleaned up later.
|
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_ARM_LPAE))
|
if (IS_ENABLED(CONFIG_ARM_LPAE))
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -2318,10 +2314,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||||||
set_dma_ops(dev, dma_ops);
|
set_dma_ops(dev, dma_ops);
|
||||||
|
|
||||||
#ifdef CONFIG_XEN
|
#ifdef CONFIG_XEN
|
||||||
if (xen_initial_domain()) {
|
if (xen_initial_domain())
|
||||||
dev->archdata.dev_dma_ops = dev->dma_ops;
|
|
||||||
dev->dma_ops = xen_dma_ops;
|
dev->dma_ops = xen_dma_ops;
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
dev->archdata.dma_ops_setup = true;
|
dev->archdata.dma_ops_setup = true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -557,11 +557,6 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_ARM
|
|
||||||
if (xen_get_dma_ops(dev)->mmap)
|
|
||||||
return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
|
|
||||||
dma_addr, size, attrs);
|
|
||||||
#endif
|
|
||||||
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -574,21 +569,6 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|||||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_ARM
|
|
||||||
if (xen_get_dma_ops(dev)->get_sgtable) {
|
|
||||||
#if 0
|
|
||||||
/*
|
|
||||||
* This check verifies that the page belongs to the current domain and
|
|
||||||
* is not one mapped from another domain.
|
|
||||||
* This check is for debug only, and should not go to production build
|
|
||||||
*/
|
|
||||||
unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
|
|
||||||
BUG_ON (!page_is_ram(bfn));
|
|
||||||
#endif
|
|
||||||
return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
|
|
||||||
handle, size, attrs);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
|
return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user