arc: fix arc_dma_{map,unmap}_page
These functions should perform the same cache synchronoization as calling
arc_dma_sync_single_for_{cpu,device} in addition to doing any required
address translation or mapping [1].  Ensure they actually do that by calling
arc_dma_sync_single_for_{cpu,device} instead of passing the dir argument
along to _dma_cache_sync.
The now unused _dma_cache_sync function is removed as well.
[1] in fact various drivers rely on that by passing DMA_ATTR_SKIP_CPU_SYNC
to the map/unmap routines and doing the cache synchronization manually.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Alexey Brodkin <abrodkin@synopsys.com>
Acked-by: Vineet Gupta <vgupta@synopsys.com>
			
			
This commit is contained in:
		
							parent
							
								
									b591741072
								
							
						
					
					
						commit
						a8eb92d02d
					
				| @ -130,29 +130,6 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * streaming DMA Mapping API... | ||||
|  * CPU accesses page via normal paddr, thus needs to explicitly made | ||||
|  * consistent before each use | ||||
|  */ | ||||
| static void _dma_cache_sync(phys_addr_t paddr, size_t size, | ||||
| 		enum dma_data_direction dir) | ||||
| { | ||||
| 	switch (dir) { | ||||
| 	case DMA_FROM_DEVICE: | ||||
| 		dma_cache_inv(paddr, size); | ||||
| 		break; | ||||
| 	case DMA_TO_DEVICE: | ||||
| 		dma_cache_wback(paddr, size); | ||||
| 		break; | ||||
| 	case DMA_BIDIRECTIONAL: | ||||
| 		dma_cache_wback_inv(paddr, size); | ||||
| 		break; | ||||
| 	default: | ||||
| 		pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void arc_dma_sync_single_for_device(struct device *dev, | ||||
| 		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | ||||
| { | ||||
| @ -185,7 +162,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, | ||||
| 	phys_addr_t paddr = page_to_phys(page) + offset; | ||||
| 
 | ||||
| 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 		_dma_cache_sync(paddr, size, dir); | ||||
| 		arc_dma_sync_single_for_device(dev, paddr, size, dir); | ||||
| 
 | ||||
| 	return paddr; | ||||
| } | ||||
| @ -205,7 +182,7 @@ static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle, | ||||
| 	phys_addr_t paddr = handle; | ||||
| 
 | ||||
| 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||||
| 		_dma_cache_sync(paddr, size, dir); | ||||
| 		arc_dma_sync_single_for_cpu(dev, paddr, size, dir); | ||||
| } | ||||
| 
 | ||||
| static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user