mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
dma-mapping: zero memory returned from dma_alloc_*
If we want to map memory from the DMA allocator to userspace it must be zeroed at allocation time to prevent stale data leaks. We already do this on most common architectures, but some architectures don't do this yet, fix them up, either by passing GFP_ZERO when we use the normal page allocator or doing a manual memset otherwise. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Sam Ravnborg <sam@ravnborg.org> [sparc]
This commit is contained in:
parent
6c503d0d88
commit
518a2f1925
@ -443,7 +443,7 @@ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
|
|||||||
gfp &= ~GFP_DMA;
|
gfp &= ~GFP_DMA;
|
||||||
|
|
||||||
try_again:
|
try_again:
|
||||||
cpu_addr = (void *)__get_free_pages(gfp, order);
|
cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
|
||||||
if (! cpu_addr) {
|
if (! cpu_addr) {
|
||||||
printk(KERN_INFO "pci_alloc_consistent: "
|
printk(KERN_INFO "pci_alloc_consistent: "
|
||||||
"get_free_pages failed from %pf\n",
|
"get_free_pages failed from %pf\n",
|
||||||
|
@ -33,7 +33,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
*/
|
*/
|
||||||
BUG_ON(gfp & __GFP_HIGHMEM);
|
BUG_ON(gfp & __GFP_HIGHMEM);
|
||||||
|
|
||||||
page = alloc_pages(gfp, order);
|
page = alloc_pages(gfp | __GFP_ZERO, order);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -78,6 +78,7 @@ static void __free_dma_pages(u32 addr, int order)
|
|||||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
gfp_t gfp, unsigned long attrs)
|
gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
|
void *ret;
|
||||||
u32 paddr;
|
u32 paddr;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
@ -94,7 +95,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||||||
if (!paddr)
|
if (!paddr)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return phys_to_virt(paddr);
|
ret = phys_to_virt(paddr);
|
||||||
|
memset(ret, 0, 1 << order);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -32,7 +32,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
|
|
||||||
page = alloc_pages(flag, order);
|
page = alloc_pages(flag | __GFP_ZERO, order);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
|
|
||||||
vaddr = __get_free_pages(gfp, order);
|
vaddr = __get_free_pages(gfp | __GFP_ZERO, order);
|
||||||
if (!vaddr)
|
if (!vaddr)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
.mm = &init_mm
|
.mm = &init_mm
|
||||||
};
|
};
|
||||||
|
|
||||||
page = alloc_pages_exact(size, gfp);
|
page = alloc_pages_exact(size, gfp | __GFP_ZERO);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size,
|
|||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
size = 1 << (order + PAGE_SHIFT);
|
size = 1 << (order + PAGE_SHIFT);
|
||||||
vaddr = pcxl_alloc_range(size);
|
vaddr = pcxl_alloc_range(size);
|
||||||
paddr = __get_free_pages(flag, order);
|
paddr = __get_free_pages(flag | __GFP_ZERO, order);
|
||||||
flush_kernel_dcache_range(paddr, size);
|
flush_kernel_dcache_range(paddr, size);
|
||||||
paddr = __pa(paddr);
|
paddr = __pa(paddr);
|
||||||
map_uncached_pages(vaddr, size, paddr);
|
map_uncached_pages(vaddr, size, paddr);
|
||||||
@ -429,7 +429,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size,
|
|||||||
if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
|
if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
addr = (void *)__get_free_pages(flag, get_order(size));
|
addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
|
||||||
if (addr)
|
if (addr)
|
||||||
*dma_handle = (dma_addr_t)virt_to_phys(addr);
|
*dma_handle = (dma_addr_t)virt_to_phys(addr);
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
|
|||||||
dma_addr_t map;
|
dma_addr_t map;
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
page = alloc_pages(flag, get_order(size));
|
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
va = (void *) __get_free_pages(gfp, get_order(size));
|
va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size));
|
||||||
if (!va) {
|
if (!va) {
|
||||||
printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT);
|
printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -224,7 +224,7 @@ static void *iounit_alloc(struct device *dev, size_t len,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
len = PAGE_ALIGN(len);
|
len = PAGE_ALIGN(len);
|
||||||
va = __get_free_pages(gfp, get_order(len));
|
va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
|
||||||
if (!va)
|
if (!va)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -344,7 +344,7 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
len = PAGE_ALIGN(len);
|
len = PAGE_ALIGN(len);
|
||||||
va = __get_free_pages(gfp, get_order(len));
|
va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
|
||||||
if (va == 0)
|
if (va == 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||||||
flag & __GFP_NOWARN);
|
flag & __GFP_NOWARN);
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
page = alloc_pages(flag, get_order(size));
|
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -149,7 +149,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size,
|
|||||||
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
|
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
|
||||||
struct mic_device *mdev = scdev_to_mdev(scdev);
|
struct mic_device *mdev = scdev_to_mdev(scdev);
|
||||||
dma_addr_t tmp;
|
dma_addr_t tmp;
|
||||||
void *va = kmalloc(size, gfp);
|
void *va = kmalloc(size, gfp | __GFP_ZERO);
|
||||||
|
|
||||||
if (va) {
|
if (va) {
|
||||||
tmp = mic_map_single(mdev, va, size);
|
tmp = mic_map_single(mdev, va, size);
|
||||||
|
@ -13,7 +13,7 @@ static void *dma_virt_alloc(struct device *dev, size_t size,
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = (void *)__get_free_pages(gfp, get_order(size));
|
ret = (void *)__get_free_pages(gfp | __GFP_ZERO, get_order(size));
|
||||||
if (ret)
|
if (ret)
|
||||||
*dma_handle = (uintptr_t)ret;
|
*dma_handle = (uintptr_t)ret;
|
||||||
return ret;
|
return ret;
|
||||||
|
Loading…
Reference in New Issue
Block a user