Merge tag 'for-linus-5.9-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - two trivial comment fixes

 - a small series for the Xen balloon driver fixing some issues

 - a series of the Xen privcmd driver targeting elimination of using
   get_user_pages*() in this driver

 - a series for the Xen swiotlb driver cleaning it up and adding support
   for letting the kernel run as dom0 on Rpi4

* tag 'for-linus-5.9-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/arm: call dma_to_phys on the dma_addr_t parameter of dma_cache_maint
  xen/arm: introduce phys/dma translations in xen_dma_sync_for_*
  swiotlb-xen: introduce phys_to_dma/dma_to_phys translations
  swiotlb-xen: remove XEN_PFN_PHYS
  swiotlb-xen: add struct device * parameter to is_xen_swiotlb_buffer
  swiotlb-xen: add struct device * parameter to xen_dma_sync_for_device
  swiotlb-xen: add struct device * parameter to xen_dma_sync_for_cpu
  swiotlb-xen: add struct device * parameter to xen_bus_to_phys
  swiotlb-xen: add struct device * parameter to xen_phys_to_bus
  swiotlb-xen: remove start_dma_addr
  swiotlb-xen: use vmalloc_to_page on vmalloc virt addresses
  Revert "xen/balloon: Fix crash when ballooning on x86 32 bit PAE"
  xen/balloon: make the balloon wait interruptible
  xen/balloon: fix accounting in alloc_xenballooned_pages error path
  xen: hypercall.h: fix duplicated word
  xen/gntdev: gntdev.h: drop a duplicated word
  xen/privcmd: Convert get_user_pages*() to pin_user_pages*()
  xen/privcmd: Mark pages as dirty
  xen/privcmd: Corrected error handling path
This commit is contained in:
Linus Torvalds
2020-08-07 10:53:02 -07:00
8 changed files with 120 additions and 106 deletions

View File

@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h> #include <linux/dma-noncoherent.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/highmem.h> #include <linux/highmem.h>
@@ -42,15 +43,18 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order)
static bool hypercall_cflush = false; static bool hypercall_cflush = false;
/* buffers in highmem or foreign pages cannot cross page boundaries */ /* buffers in highmem or foreign pages cannot cross page boundaries */
static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op) static void dma_cache_maint(struct device *dev, dma_addr_t handle,
size_t size, u32 op)
{ {
struct gnttab_cache_flush cflush; struct gnttab_cache_flush cflush;
cflush.a.dev_bus_addr = handle & XEN_PAGE_MASK;
cflush.offset = xen_offset_in_page(handle); cflush.offset = xen_offset_in_page(handle);
cflush.op = op; cflush.op = op;
handle &= XEN_PAGE_MASK;
do { do {
cflush.a.dev_bus_addr = dma_to_phys(dev, handle);
if (size + cflush.offset > XEN_PAGE_SIZE) if (size + cflush.offset > XEN_PAGE_SIZE)
cflush.length = XEN_PAGE_SIZE - cflush.offset; cflush.length = XEN_PAGE_SIZE - cflush.offset;
else else
@@ -59,7 +63,7 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
cflush.offset = 0; cflush.offset = 0;
cflush.a.dev_bus_addr += cflush.length; handle += cflush.length;
size -= cflush.length; size -= cflush.length;
} while (size); } while (size);
} }
@@ -71,24 +75,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
* pfn_valid returns true the pages is local and we can use the native * pfn_valid returns true the pages is local and we can use the native
* dma-direct functions, otherwise we call the Xen specific version. * dma-direct functions, otherwise we call the Xen specific version.
*/ */
void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size, void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
if (pfn_valid(PFN_DOWN(handle))) if (dir != DMA_TO_DEVICE)
arch_sync_dma_for_cpu(paddr, size, dir); dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
else if (dir != DMA_TO_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
} }
void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size, void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
if (pfn_valid(PFN_DOWN(handle))) if (dir == DMA_FROM_DEVICE)
arch_sync_dma_for_device(paddr, size, dir); dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
else if (dir == DMA_FROM_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
else else
dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN); dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN);
} }
bool xen_arch_need_swiotlb(struct device *dev, bool xen_arch_need_swiotlb(struct device *dev,
@@ -96,7 +96,7 @@ bool xen_arch_need_swiotlb(struct device *dev,
dma_addr_t dev_addr) dma_addr_t dev_addr)
{ {
unsigned int xen_pfn = XEN_PFN_DOWN(phys); unsigned int xen_pfn = XEN_PFN_DOWN(phys);
unsigned int bfn = XEN_PFN_DOWN(dev_addr); unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr));
/* /*
* The swiotlb buffer should be used if * The swiotlb buffer should be used if

View File

@@ -82,7 +82,7 @@ struct xen_dm_op_buf;
* - clobber the rest * - clobber the rest
* *
* The result certainly isn't pretty, and it really shows up cpp's * The result certainly isn't pretty, and it really shows up cpp's
* weakness as as macro language. Sorry. (But let's just give thanks * weakness as a macro language. Sorry. (But let's just give thanks
* there aren't more than 5 arguments...) * there aren't more than 5 arguments...)
*/ */

View File

@@ -266,20 +266,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return NULL; return NULL;
} }
#ifdef CONFIG_SPARSEMEM
{
unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
unsigned long pfn = res->start >> PAGE_SHIFT;
if (pfn > limit) {
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
pfn, limit);
release_memory_resource(res);
return NULL;
}
}
#endif
return res; return res;
} }
@@ -568,11 +554,13 @@ static int add_ballooned_pages(int nr_pages)
if (xen_hotplug_unpopulated) { if (xen_hotplug_unpopulated) {
st = reserve_additional_memory(); st = reserve_additional_memory();
if (st != BP_ECANCELED) { if (st != BP_ECANCELED) {
int rc;
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
wait_event(balloon_wq, rc = wait_event_interruptible(balloon_wq,
!list_empty(&ballooned_pages)); !list_empty(&ballooned_pages));
mutex_lock(&balloon_mutex); mutex_lock(&balloon_mutex);
return 0; return rc ? -ENOMEM : 0;
} }
} }
@@ -630,6 +618,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
out_undo: out_undo:
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
free_xenballooned_pages(pgno, pages); free_xenballooned_pages(pgno, pages);
/*
* NB: free_xenballooned_pages will only subtract pgno pages, but since
* target_unpopulated is incremented with nr_pages at the start we need
* to remove the remaining ones also, or accounting will be screwed.
*/
balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret; return ret;
} }
EXPORT_SYMBOL(alloc_xenballooned_pages); EXPORT_SYMBOL(alloc_xenballooned_pages);

View File

@@ -580,13 +580,13 @@ out_unlock:
static int lock_pages( static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num, struct privcmd_dm_op_buf kbufs[], unsigned int num,
struct page *pages[], unsigned int nr_pages) struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
unsigned int requested; unsigned int requested;
int pinned; int page_count;
requested = DIV_ROUND_UP( requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size, offset_in_page(kbufs[i].uptr) + kbufs[i].size,
@@ -594,14 +594,15 @@ static int lock_pages(
if (requested > nr_pages) if (requested > nr_pages)
return -ENOSPC; return -ENOSPC;
pinned = get_user_pages_fast( page_count = pin_user_pages_fast(
(unsigned long) kbufs[i].uptr, (unsigned long) kbufs[i].uptr,
requested, FOLL_WRITE, pages); requested, FOLL_WRITE, pages);
if (pinned < 0) if (page_count < 0)
return pinned; return page_count;
nr_pages -= pinned; *pinned += page_count;
pages += pinned; nr_pages -= page_count;
pages += page_count;
} }
return 0; return 0;
@@ -609,15 +610,7 @@ static int lock_pages(
static void unlock_pages(struct page *pages[], unsigned int nr_pages) static void unlock_pages(struct page *pages[], unsigned int nr_pages)
{ {
unsigned int i; unpin_user_pages_dirty_lock(pages, nr_pages, true);
if (!pages)
return;
for (i = 0; i < nr_pages; i++) {
if (pages[i])
put_page(pages[i]);
}
} }
static long privcmd_ioctl_dm_op(struct file *file, void __user *udata) static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
@@ -630,6 +623,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
struct xen_dm_op_buf *xbufs = NULL; struct xen_dm_op_buf *xbufs = NULL;
unsigned int i; unsigned int i;
long rc; long rc;
unsigned int pinned = 0;
if (copy_from_user(&kdata, udata, sizeof(kdata))) if (copy_from_user(&kdata, udata, sizeof(kdata)))
return -EFAULT; return -EFAULT;
@@ -683,9 +677,11 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
goto out; goto out;
} }
rc = lock_pages(kbufs, kdata.num, pages, nr_pages); rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
if (rc) if (rc < 0) {
nr_pages = pinned;
goto out; goto out;
}
for (i = 0; i < kdata.num; i++) { for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr); set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);

View File

@@ -52,37 +52,39 @@ static unsigned long xen_io_tlb_nslabs;
* Quick lookup value of the bus address of the IOTLB. * Quick lookup value of the bus address of the IOTLB.
*/ */
static u64 start_dma_addr; static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
/*
* Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
* can be 32bit when dma_addr_t is 64bit leading to a loss in
* information if the shift is done before casting to 64bit.
*/
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{ {
unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT; phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
dma |= paddr & ~XEN_PAGE_MASK; baddr |= paddr & ~XEN_PAGE_MASK;
return baddr;
return dma;
} }
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
}
static inline phys_addr_t xen_bus_to_phys(struct device *dev,
phys_addr_t baddr)
{ {
unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr)); unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT; phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
phys_addr_t paddr = dma; (baddr & ~XEN_PAGE_MASK);
paddr |= baddr & ~XEN_PAGE_MASK;
return paddr; return paddr;
} }
static inline dma_addr_t xen_virt_to_bus(void *address) static inline phys_addr_t xen_dma_to_phys(struct device *dev,
dma_addr_t dma_addr)
{ {
return xen_phys_to_bus(virt_to_phys(address)); return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
}
static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
{
return xen_phys_to_dma(dev, virt_to_phys(address));
} }
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
@@ -99,11 +101,11 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
return 0; return 0;
} }
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
{ {
unsigned long bfn = XEN_PFN_DOWN(dma_addr); unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
unsigned long xen_pfn = bfn_to_local_pfn(bfn); unsigned long xen_pfn = bfn_to_local_pfn(bfn);
phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn); phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
/* If the address is outside our domain, it CAN /* If the address is outside our domain, it CAN
* have the same virtual address as another address * have the same virtual address as another address
@@ -241,7 +243,6 @@ retry:
m_ret = XEN_SWIOTLB_EFIXUP; m_ret = XEN_SWIOTLB_EFIXUP;
goto error; goto error;
} }
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
if (early) { if (early) {
if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
verbose)) verbose))
@@ -307,12 +308,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (hwdev && hwdev->coherent_dma_mask) if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask; dma_mask = hwdev->coherent_dma_mask;
/* At this point dma_handle is the physical address, next we are /* At this point dma_handle is the dma address, next we are
* going to set it to the machine address. * going to set it to the machine address.
* Do not use virt_to_phys(ret) because on ARM it doesn't correspond * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
* to *dma_handle. */ * to *dma_handle. */
phys = *dma_handle; phys = dma_to_phys(hwdev, *dma_handle);
dev_addr = xen_phys_to_bus(phys); dev_addr = xen_phys_to_dma(hwdev, phys);
if (((dev_addr + size - 1 <= dma_mask)) && if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size)) !range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr; *dma_handle = dev_addr;
@@ -322,6 +323,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL; return NULL;
} }
*dma_handle = phys_to_dma(hwdev, *dma_handle);
SetPageXenRemapped(virt_to_page(ret)); SetPageXenRemapped(virt_to_page(ret));
} }
memset(ret, 0, size); memset(ret, 0, size);
@@ -335,23 +337,30 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
int order = get_order(size); int order = get_order(size);
phys_addr_t phys; phys_addr_t phys;
u64 dma_mask = DMA_BIT_MASK(32); u64 dma_mask = DMA_BIT_MASK(32);
struct page *page;
if (hwdev && hwdev->coherent_dma_mask) if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask; dma_mask = hwdev->coherent_dma_mask;
/* do not use virt_to_phys because on ARM it doesn't return you the /* do not use virt_to_phys because on ARM it doesn't return you the
* physical address */ * physical address */
phys = xen_bus_to_phys(dev_addr); phys = xen_dma_to_phys(hwdev, dev_addr);
/* Convert the size to actually allocated. */ /* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT); size = 1UL << (order + XEN_PAGE_SHIFT);
if (is_vmalloc_addr(vaddr))
page = vmalloc_to_page(vaddr);
else
page = virt_to_page(vaddr);
if (!WARN_ON((dev_addr + size - 1 > dma_mask) || if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
range_straddles_page_boundary(phys, size)) && range_straddles_page_boundary(phys, size)) &&
TestClearPageXenRemapped(virt_to_page(vaddr))) TestClearPageXenRemapped(page))
xen_destroy_contiguous_region(phys, order); xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
attrs);
} }
/* /*
@@ -367,7 +376,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long attrs) unsigned long attrs)
{ {
phys_addr_t map, phys = page_to_phys(page) + offset; phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_bus(phys); dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
/* /*
@@ -386,13 +395,13 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/ */
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
size, size, dir, attrs); phys, size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR) if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
phys = map; phys = map;
dev_addr = xen_phys_to_bus(map); dev_addr = xen_phys_to_dma(dev, map);
/* /*
* Ensure that the address returned is DMA'ble * Ensure that the address returned is DMA'ble
@@ -404,8 +413,12 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
} }
done: done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
xen_dma_sync_for_device(dev_addr, phys, size, dir); if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
arch_sync_dma_for_device(phys, size, dir);
else
xen_dma_sync_for_device(dev, dev_addr, size, dir);
}
return dev_addr; return dev_addr;
} }
@@ -420,15 +433,19 @@ done:
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
phys_addr_t paddr = xen_bus_to_phys(dev_addr); phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
xen_dma_sync_for_cpu(dev_addr, paddr, size, dir); if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
arch_sync_dma_for_cpu(paddr, size, dir);
else
xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
}
/* NOTE: We use dev_addr here, not paddr! */ /* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) if (is_xen_swiotlb_buffer(hwdev, dev_addr))
swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
} }
@@ -436,12 +453,16 @@ static void
xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
phys_addr_t paddr = xen_bus_to_phys(dma_addr); phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev)) {
xen_dma_sync_for_cpu(dma_addr, paddr, size, dir); if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
arch_sync_dma_for_cpu(paddr, size, dir);
else
xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
}
if (is_xen_swiotlb_buffer(dma_addr)) if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
} }
@@ -449,13 +470,17 @@ static void
xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
phys_addr_t paddr = xen_bus_to_phys(dma_addr); phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
if (is_xen_swiotlb_buffer(dma_addr)) if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev)) {
xen_dma_sync_for_device(dma_addr, paddr, size, dir); if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
arch_sync_dma_for_device(paddr, size, dir);
else
xen_dma_sync_for_device(dev, dma_addr, size, dir);
}
} }
/* /*
@@ -536,7 +561,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{ {
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
} }
const struct dma_map_ops xen_swiotlb_dma_ops = { const struct dma_map_ops xen_swiotlb_dma_ops = {

View File

@@ -66,7 +66,7 @@ struct ioctl_gntdev_map_grant_ref {
/* /*
* Removes the grant references from the mapping table of an instance of * Removes the grant references from the mapping table of an instance of
* of gntdev. N.B. munmap() must be called on the relevant virtual address(es) * gntdev. N.B. munmap() must be called on the relevant virtual address(es)
* before this ioctl is called, or an error will result. * before this ioctl is called, or an error will result.
*/ */
#define IOCTL_GNTDEV_UNMAP_GRANT_REF \ #define IOCTL_GNTDEV_UNMAP_GRANT_REF \

View File

@@ -24,7 +24,6 @@
#define XEN_PFN_DOWN(x) ((x) >> XEN_PAGE_SHIFT) #define XEN_PFN_DOWN(x) ((x) >> XEN_PAGE_SHIFT)
#define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT) #define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT)
#define XEN_PFN_PHYS(x) ((phys_addr_t)(x) << XEN_PAGE_SHIFT)
#include <asm/xen/page.h> #include <asm/xen/page.h>

View File

@@ -4,10 +4,10 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size, void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
enum dma_data_direction dir); size_t size, enum dma_data_direction dir);
void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size, void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
enum dma_data_direction dir); size_t size, enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early); extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops; extern const struct dma_map_ops xen_swiotlb_dma_ops;