mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
mm/folio: replace set_compound_order with folio_set_order
The patch ("mm/folio: Avoid special handling for order value 0 in folio_set_order") [1] removed the need for special handling of order = 0 in folio_set_order. Now, folio_set_order and set_compound_order becomes similar function. This patch removes the set_compound_order and uses folio_set_order instead. [1] https://lore.kernel.org/all/20230609183032.13E08C433D2@smtp.kernel.org/ Link: https://lkml.kernel.org/r/20230612093514.689846-1-tsahu@linux.ibm.com Signed-off-by: Tarun Sahu <tsahu@linux.ibm.com> Reviewed-by Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0bb488498c
commit
1e3be4856f
@ -1232,16 +1232,6 @@ static inline void folio_set_compound_dtor(struct folio *folio,
|
||||
|
||||
void destroy_large_folio(struct folio *folio);
|
||||
|
||||
static inline void set_compound_order(struct page *page, unsigned int order)
|
||||
{
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
||||
folio->_folio_order = order;
|
||||
#ifdef CONFIG_64BIT
|
||||
folio->_folio_nr_pages = 1U << order;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Returns the number of bytes in this potentially compound page. */
|
||||
static inline unsigned long page_size(struct page *page)
|
||||
{
|
||||
|
@ -387,12 +387,27 @@ extern void memblock_free_pages(struct page *page, unsigned long pfn,
|
||||
unsigned int order);
|
||||
extern void __free_pages_core(struct page *page, unsigned int order);
|
||||
|
||||
/*
|
||||
* This will have no effect, other than possibly generating a warning, if the
|
||||
* caller passes in a non-large folio.
|
||||
*/
|
||||
static inline void folio_set_order(struct folio *folio, unsigned int order)
|
||||
{
|
||||
if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
|
||||
return;
|
||||
|
||||
folio->_folio_order = order;
|
||||
#ifdef CONFIG_64BIT
|
||||
folio->_folio_nr_pages = 1U << order;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void prep_compound_head(struct page *page, unsigned int order)
|
||||
{
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
||||
folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR);
|
||||
set_compound_order(page, order);
|
||||
folio_set_order(folio, order);
|
||||
atomic_set(&folio->_entire_mapcount, -1);
|
||||
atomic_set(&folio->_nr_pages_mapped, 0);
|
||||
atomic_set(&folio->_pincount, 0);
|
||||
@ -432,21 +447,6 @@ void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
|
||||
int split_free_page(struct page *free_page,
|
||||
unsigned int order, unsigned long split_pfn_offset);
|
||||
|
||||
/*
|
||||
* This will have no effect, other than possibly generating a warning, if the
|
||||
* caller passes in a non-large folio.
|
||||
*/
|
||||
static inline void folio_set_order(struct folio *folio, unsigned int order)
|
||||
{
|
||||
if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
|
||||
return;
|
||||
|
||||
folio->_folio_order = order;
|
||||
#ifdef CONFIG_64BIT
|
||||
folio->_folio_nr_pages = 1U << order;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user