ARC: dma: ioremap: use phys_addr_t consistenctly in code paths
To support dma in physical memory beyond 4GB with PAE40 Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
This commit is contained in:
parent
971573cf57
commit
f5db19e93f
@ -40,9 +40,9 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
|
||||
|
||||
void flush_dcache_page(struct page *page);
|
||||
|
||||
void dma_cache_wback_inv(unsigned long start, unsigned long sz);
|
||||
void dma_cache_inv(unsigned long start, unsigned long sz);
|
||||
void dma_cache_wback(unsigned long start, unsigned long sz);
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_wback(phys_addr_t start, unsigned long sz);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
@ -13,8 +13,8 @@
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
|
||||
extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
unsigned long flags);
|
||||
extern void iounmap(const void __iomem *addr);
|
||||
|
||||
|
@ -28,9 +28,9 @@ volatile int slc_enable = 1, ioc_enable = 1;
|
||||
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
|
||||
unsigned long sz, const int cacheop);
|
||||
|
||||
void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
|
||||
void (*__dma_cache_inv)(unsigned long start, unsigned long sz);
|
||||
void (*__dma_cache_wback)(unsigned long start, unsigned long sz);
|
||||
void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
|
||||
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
|
||||
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
|
||||
|
||||
char *arc_cache_mumbojumbo(int c, char *buf, int len)
|
||||
{
|
||||
@ -633,17 +633,17 @@ EXPORT_SYMBOL(flush_dcache_page);
|
||||
* DMA ops for systems with L1 cache only
|
||||
* Make memory coherent with L1 cache by flushing/invalidating L1 lines
|
||||
*/
|
||||
static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
|
||||
}
|
||||
|
||||
static void __dma_cache_inv_l1(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_INV);
|
||||
}
|
||||
|
||||
static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_FLUSH);
|
||||
}
|
||||
@ -652,19 +652,19 @@ static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
|
||||
* DMA ops for systems with both L1 and L2 caches, but without IOC
|
||||
* Both L1 and L2 lines need to be explicitly flushed/invalidated
|
||||
*/
|
||||
static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
|
||||
slc_op(start, sz, OP_FLUSH_N_INV);
|
||||
}
|
||||
|
||||
static void __dma_cache_inv_slc(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_INV);
|
||||
slc_op(start, sz, OP_INV);
|
||||
}
|
||||
|
||||
static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_FLUSH);
|
||||
slc_op(start, sz, OP_FLUSH);
|
||||
@ -675,26 +675,26 @@ static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
|
||||
* IOC hardware snoops all DMA traffic keeping the caches consistent with
|
||||
* memory - eliding need for any explicit cache maintenance of DMA buffers
|
||||
*/
|
||||
static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {}
|
||||
static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {}
|
||||
static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {}
|
||||
static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
|
||||
static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
|
||||
static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
|
||||
|
||||
/*
|
||||
* Exported DMA API
|
||||
*/
|
||||
void dma_cache_wback_inv(unsigned long start, unsigned long sz)
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dma_cache_wback_inv(start, sz);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_wback_inv);
|
||||
|
||||
void dma_cache_inv(unsigned long start, unsigned long sz)
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dma_cache_inv(start, sz);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_inv);
|
||||
|
||||
void dma_cache_wback(unsigned long start, unsigned long sz)
|
||||
void dma_cache_wback(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dma_cache_wback(start, sz);
|
||||
}
|
||||
|
@ -65,13 +65,13 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
|
||||
|
||||
/* This is kernel Virtual address (0x7000_0000 based) */
|
||||
if (need_kvaddr) {
|
||||
kvaddr = ioremap_nocache((unsigned long)paddr, size);
|
||||
kvaddr = ioremap_nocache(paddr, size);
|
||||
if (kvaddr == NULL) {
|
||||
__free_pages(page, order);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
kvaddr = (void *)paddr;
|
||||
kvaddr = (void *)(u32)paddr;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -85,7 +85,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
|
||||
* will be optimized as a separate commit
|
||||
*/
|
||||
if (need_coh)
|
||||
dma_cache_wback_inv((unsigned long)paddr, size);
|
||||
dma_cache_wback_inv(paddr, size);
|
||||
|
||||
return kvaddr;
|
||||
}
|
||||
@ -110,7 +110,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
* CPU accesses page via normal paddr, thus needs to explicitly made
|
||||
* consistent before each use
|
||||
*/
|
||||
static void _dma_cache_sync(unsigned long paddr, size_t size,
|
||||
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
@ -124,7 +124,7 @@ static void _dma_cache_sync(unsigned long paddr, size_t size,
|
||||
dma_cache_wback_inv(paddr, size);
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
|
||||
pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -132,7 +132,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long paddr = page_to_phys(page) + offset;
|
||||
phys_addr_t paddr = page_to_phys(page) + offset;
|
||||
_dma_cache_sync(paddr, size, dir);
|
||||
return (dma_addr_t)paddr;
|
||||
}
|
||||
|
@ -14,18 +14,21 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
void __iomem *ioremap(unsigned long paddr, unsigned long size)
|
||||
void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
|
||||
{
|
||||
unsigned long end;
|
||||
phys_addr_t end;
|
||||
|
||||
/* Don't allow wraparound or zero size */
|
||||
end = paddr + size - 1;
|
||||
if (!size || (end < paddr))
|
||||
return NULL;
|
||||
|
||||
/* If the region is h/w uncached, avoid MMU mappings */
|
||||
/*
|
||||
* If the region is h/w uncached, MMU mapping can be elided as optim
|
||||
* The cast to u32 is fine as this region can only be inside 4GB
|
||||
*/
|
||||
if (paddr >= ARC_UNCACHED_ADDR_SPACE)
|
||||
return (void __iomem *)paddr;
|
||||
return (void __iomem *)(u32)paddr;
|
||||
|
||||
return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
|
||||
}
|
||||
@ -41,9 +44,9 @@ EXPORT_SYMBOL(ioremap);
|
||||
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
void __iomem *vaddr;
|
||||
unsigned long vaddr;
|
||||
struct vm_struct *area;
|
||||
unsigned long off, end;
|
||||
phys_addr_t off, end;
|
||||
pgprot_t prot = __pgprot(flags);
|
||||
|
||||
/* Don't allow wraparound, zero size */
|
||||
@ -70,9 +73,8 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
if (!area)
|
||||
return NULL;
|
||||
area->phys_addr = paddr;
|
||||
vaddr = (void __iomem *)area->addr;
|
||||
if (ioremap_page_range((unsigned long)vaddr,
|
||||
(unsigned long)vaddr + size, paddr, prot)) {
|
||||
vaddr = (unsigned long)area->addr;
|
||||
if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
|
||||
vunmap((void __force *)vaddr);
|
||||
return NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user