iommu/intel: small map_page cleanup
Pass the page + offset to the low-level __iommu_map_single helper (which gets renamed to fit the new calling conventions) as both callers have the page at hand. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b3aa14f022
commit
964f2311a6
@ -3597,9 +3597,11 @@ static int iommu_no_mapping(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, int dir, u64 dma_mask)
|
||||
static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, int dir,
|
||||
u64 dma_mask)
|
||||
{
|
||||
phys_addr_t paddr = page_to_phys(page) + offset;
|
||||
struct dmar_domain *domain;
|
||||
phys_addr_t start_paddr;
|
||||
unsigned long iova_pfn;
|
||||
@ -3661,8 +3663,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return __intel_map_single(dev, page_to_phys(page) + offset, size,
|
||||
dir, *dev->dma_mask);
|
||||
return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
|
||||
}
|
||||
|
||||
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
|
||||
@ -3753,9 +3754,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
|
||||
return NULL;
|
||||
memset(page_address(page), 0, size);
|
||||
|
||||
*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
|
||||
DMA_BIDIRECTIONAL,
|
||||
dev->coherent_dma_mask);
|
||||
*dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
|
||||
dev->coherent_dma_mask);
|
||||
if (*dma_handle)
|
||||
return page_address(page);
|
||||
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
|
||||
|
Loading…
Reference in New Issue
Block a user