forked from Minki/linux
x86: export pci-nommu's alloc_coherent
This patch exports nommu_alloc_coherent (renamed dma_generic_alloc_coherent). GART needs this function. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
afa9fdc2f5
commit
9f6ac57729
@ -134,6 +134,37 @@ unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
|
||||
EXPORT_SYMBOL(iommu_num_pages);
|
||||
#endif
|
||||
|
||||
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag)
|
||||
{
|
||||
unsigned long dma_mask;
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
|
||||
dma_mask = dma_alloc_coherent_mask(dev, flag);
|
||||
|
||||
flag |= __GFP_ZERO;
|
||||
again:
|
||||
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
addr = page_to_phys(page);
|
||||
if (!is_buffer_dma_capable(dma_mask, addr, size)) {
|
||||
__free_pages(page, get_order(size));
|
||||
|
||||
if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
|
||||
flag = (flag & ~GFP_DMA32) | GFP_DMA;
|
||||
goto again;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*dma_addr = addr;
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
|
||||
* documentation.
|
||||
|
@ -72,43 +72,6 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void *
|
||||
nommu_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t gfp)
|
||||
{
|
||||
unsigned long dma_mask;
|
||||
int node;
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
|
||||
dma_mask = dma_alloc_coherent_mask(hwdev, gfp);
|
||||
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
node = dev_to_node(hwdev);
|
||||
again:
|
||||
page = alloc_pages_node(node, gfp, get_order(size));
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
addr = page_to_phys(page);
|
||||
if (!is_buffer_dma_capable(dma_mask, addr, size) && !(gfp & GFP_DMA)) {
|
||||
free_pages((unsigned long)page_address(page), get_order(size));
|
||||
gfp |= GFP_DMA;
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (check_addr("alloc_coherent", hwdev, addr, size)) {
|
||||
*dma_addr = addr;
|
||||
flush_write_buffers();
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
free_pages((unsigned long)page_address(page), get_order(size));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
@ -116,7 +79,7 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
}
|
||||
|
||||
struct dma_mapping_ops nommu_dma_ops = {
|
||||
.alloc_coherent = nommu_alloc_coherent,
|
||||
.alloc_coherent = dma_generic_alloc_coherent,
|
||||
.free_coherent = nommu_free_coherent,
|
||||
.map_single = nommu_map_single,
|
||||
.map_sg = nommu_map_sg,
|
||||
|
@ -89,6 +89,9 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
extern int dma_supported(struct device *hwdev, u64 mask);
|
||||
extern int dma_set_mask(struct device *dev, u64 mask);
|
||||
|
||||
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag);
|
||||
|
||||
static inline dma_addr_t
|
||||
dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
||||
int direction)
|
||||
|
Loading…
Reference in New Issue
Block a user