mirror of
https://github.com/torvalds/linux.git
synced 2025-01-01 15:51:46 +00:00
dma-direct: don't fail on highmem CMA pages in dma_direct_alloc_pages
When dma_direct_alloc_pages encounters a highmem page it just gives up
currently. But what we really should do is to try memory using the
page allocator instead - without this platforms with a global highmem
CMA pool will fail all dma_alloc_pages allocations.
Fixes: efa70f2fdc
("dma-mapping: add a new dma_alloc_pages API")
Reported-by: Mark O'Neill <mao@tumblingdice.co.uk>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
566fb90e05
commit
92826e9675
@ -115,7 +115,7 @@ static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
|
||||
}
|
||||
|
||||
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
gfp_t gfp)
|
||||
gfp_t gfp, bool allow_highmem)
|
||||
{
|
||||
int node = dev_to_node(dev);
|
||||
struct page *page = NULL;
|
||||
@ -129,9 +129,12 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
&phys_limit);
|
||||
page = dma_alloc_contiguous(dev, size, gfp);
|
||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
dma_free_contiguous(dev, page, size);
|
||||
page = NULL;
|
||||
if (page) {
|
||||
if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
|
||||
(!allow_highmem && PageHighMem(page))) {
|
||||
dma_free_contiguous(dev, page, size);
|
||||
page = NULL;
|
||||
}
|
||||
}
|
||||
again:
|
||||
if (!page)
|
||||
@ -189,7 +192,7 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
@ -262,7 +265,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
|
||||
|
||||
/* we always manually zero the memory once we are done */
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
@ -370,19 +373,9 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
|
||||
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
|
||||
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp);
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp, false);
|
||||
if (!page)
|
||||
return NULL;
|
||||
if (PageHighMem(page)) {
|
||||
/*
|
||||
* Depending on the cma= arguments and per-arch setup
|
||||
* dma_alloc_contiguous could return highmem pages.
|
||||
* Without remapping there is no way to return them here,
|
||||
* so log an error and fail.
|
||||
*/
|
||||
dev_info(dev, "Rejecting highmem page from CMA.\n");
|
||||
goto out_free_pages;
|
||||
}
|
||||
|
||||
ret = page_address(page);
|
||||
if (dma_set_decrypted(dev, ret, size))
|
||||
|
Loading…
Reference in New Issue
Block a user