forked from Minki/linux
mm/page_alloc: Add folio allocation functions
The __folio_alloc(), __folio_alloc_node() and folio_alloc() functions are mostly for type safety, but they also ensure that the page allocator allocates a compound page and initialises the deferred list if the page is large enough to have one. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
0d31125d2d
commit
cc09cb1341
@ -523,6 +523,8 @@ static inline void arch_alloc_page(struct page *page, int order) { }
|
||||
|
||||
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
|
||||
nodemask_t *nodemask);
|
||||
struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
|
||||
nodemask_t *nodemask);
|
||||
|
||||
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
|
||||
nodemask_t *nodemask, int nr_pages,
|
||||
@ -564,6 +566,15 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
|
||||
return __alloc_pages(gfp_mask, order, nid, NULL);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
|
||||
{
|
||||
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
|
||||
VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
|
||||
|
||||
return __folio_alloc(gfp, order, nid, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
|
||||
* prefer the current CPU's closest node. Otherwise node must be valid and
|
||||
@ -580,6 +591,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
struct page *alloc_pages(gfp_t gfp, unsigned int order);
|
||||
struct folio *folio_alloc(gfp_t gfp, unsigned order);
|
||||
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
int node, bool hugepage);
|
||||
@ -590,6 +602,10 @@ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
return alloc_pages_node(numa_node_id(), gfp_mask, order);
|
||||
}
|
||||
static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
|
||||
{
|
||||
return __folio_alloc_node(gfp, order, numa_node_id());
|
||||
}
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
|
||||
alloc_pages(gfp_mask, order)
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
|
@ -2202,6 +2202,16 @@ struct page *alloc_pages(gfp_t gfp, unsigned order)
|
||||
}
|
||||
EXPORT_SYMBOL(alloc_pages);
|
||||
|
||||
struct folio *folio_alloc(gfp_t gfp, unsigned order)
|
||||
{
|
||||
struct page *page = alloc_pages(gfp | __GFP_COMP, order);
|
||||
|
||||
if (page && order > 1)
|
||||
prep_transhuge_page(page);
|
||||
return (struct folio *)page;
|
||||
}
|
||||
EXPORT_SYMBOL(folio_alloc);
|
||||
|
||||
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
|
||||
{
|
||||
struct mempolicy *pol = mpol_dup(vma_policy(src));
|
||||
|
@ -5400,6 +5400,18 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(__alloc_pages);
|
||||
|
||||
struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
|
||||
nodemask_t *nodemask)
|
||||
{
|
||||
struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
|
||||
preferred_nid, nodemask);
|
||||
|
||||
if (page && order > 1)
|
||||
prep_transhuge_page(page);
|
||||
return (struct folio *)page;
|
||||
}
|
||||
EXPORT_SYMBOL(__folio_alloc);
|
||||
|
||||
/*
|
||||
* Common helper functions. Never use with __GFP_HIGHMEM because the returned
|
||||
* address cannot represent highmem pages. Use alloc_pages and then kmap if
|
||||
|
Loading…
Reference in New Issue
Block a user