powerpc/dma: Use the struct dma_attrs in iommu code

Update iommu_alloc() to take the struct dma_attrs and pass them on to
tce_build(). This change propagates down to the tce_build functions of
all the platforms.

Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Mark Nelson 2008-07-16 05:51:47 +10:00 committed by Benjamin Herrenschmidt
parent 4795b7801b
commit 4f3dd8a062
7 changed files with 28 additions and 16 deletions

View File

@ -186,7 +186,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *page, unsigned int npages,
enum dma_data_direction direction,
unsigned long mask, unsigned int align_order)
unsigned long mask, unsigned int align_order,
struct dma_attrs *attrs)
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
@ -205,7 +206,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* Put the TCEs in the HW table */
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
direction);
direction, attrs);
/* Flush/invalidate TLB caches if necessary */
@ -336,7 +337,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages, entry, dma_addr);
/* Insert into HW table */
ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK,
direction, attrs);
/* If we are in an open segment, try merging */
if (segstart != s) {
@ -573,7 +575,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> IOMMU_PAGE_SHIFT, align);
mask >> IOMMU_PAGE_SHIFT, align,
attrs);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
@ -642,7 +645,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order);
mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;

View File

@ -173,7 +173,8 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
}
static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction)
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
int i;
unsigned long *io_pte, base_pte;
@ -519,7 +520,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
__set_bit(0, window->table.it_map);
tce_build_cell(&window->table, window->table.it_offset, 1,
(unsigned long)iommu->pad_page, DMA_TO_DEVICE);
(unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
window->table.it_hint = window->table.it_blocksize;
return window;

View File

@ -42,7 +42,8 @@
#include <asm/iseries/iommu.h>
static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction)
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 rc;
u64 tce, rpn;

View File

@ -85,7 +85,8 @@ static int iommu_table_iobmap_inited;
static void iobmap_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u32 *ip;
u32 rpn;

View File

@ -50,7 +50,8 @@
static void tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 proto_tce;
u64 *tcep;
@ -95,7 +96,8 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 rc;
u64 proto_tce, tce;
@ -127,7 +129,8 @@ static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 rc;
u64 proto_tce;
@ -136,7 +139,8 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long l, limit;
if (npages == 1) {
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction);
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
return;
}
@ -150,7 +154,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction);
direction, attrs);
return;
}
__get_cpu_var(tce_page) = tcep;

View File

@ -149,7 +149,8 @@ static void dart_flush(struct iommu_table *tbl)
static void dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
unsigned int *dp;
unsigned int rpn;

View File

@ -80,7 +80,8 @@ struct machdep_calls {
long index,
long npages,
unsigned long uaddr,
enum dma_data_direction direction);
enum dma_data_direction direction,
struct dma_attrs *attrs);
void (*tce_free)(struct iommu_table *tbl,
long index,
long npages);