mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
mm/page_owner: use order instead of nr in split_page_owner()
We do not have non power of two pages, using nr is error prone if nr is not power-of-two. Use page order instead. Link: https://lkml.kernel.org/r/20240226205534.1603748-5-zi.yan@sent.com Signed-off-by: Zi Yan <ziy@nvidia.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Luis Chamberlain <mcgrof@kernel.org> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Michal Koutny <mkoutny@suse.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yang Shi <shy828301@gmail.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zach O'Keefe <zokeefe@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
502003bb76
commit
9a581c12cd
@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
|
||||
extern void __reset_page_owner(struct page *page, unsigned short order);
|
||||
extern void __set_page_owner(struct page *page,
|
||||
unsigned short order, gfp_t gfp_mask);
|
||||
extern void __split_page_owner(struct page *page, unsigned int nr);
|
||||
extern void __split_page_owner(struct page *page, int order);
|
||||
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
|
||||
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
|
||||
extern void __dump_page_owner(const struct page *page);
|
||||
@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
|
||||
__set_page_owner(page, order, gfp_mask);
|
||||
}
|
||||
|
||||
static inline void split_page_owner(struct page *page, unsigned int nr)
|
||||
static inline void split_page_owner(struct page *page, int order)
|
||||
{
|
||||
if (static_branch_unlikely(&page_owner_inited))
|
||||
__split_page_owner(page, nr);
|
||||
__split_page_owner(page, order);
|
||||
}
|
||||
static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
|
||||
{
|
||||
@ -59,8 +59,7 @@ static inline void set_page_owner(struct page *page,
|
||||
unsigned int order, gfp_t gfp_mask)
|
||||
{
|
||||
}
|
||||
static inline void split_page_owner(struct page *page,
|
||||
unsigned short order)
|
||||
static inline void split_page_owner(struct page *page, int order)
|
||||
{
|
||||
}
|
||||
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
|
||||
|
@ -2933,7 +2933,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
||||
unlock_page_lruvec(lruvec);
|
||||
/* Caller disabled irqs, so they are still disabled here */
|
||||
|
||||
split_page_owner(head, nr);
|
||||
split_page_owner(head, order);
|
||||
|
||||
/* See comment in __split_huge_page_tail() */
|
||||
if (PageAnon(head)) {
|
||||
|
@ -2616,7 +2616,7 @@ void split_page(struct page *page, unsigned int order)
|
||||
|
||||
for (i = 1; i < (1 << order); i++)
|
||||
set_page_refcounted(page + i);
|
||||
split_page_owner(page, 1 << order);
|
||||
split_page_owner(page, order);
|
||||
split_page_memcg(page, order);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(split_page);
|
||||
@ -4801,7 +4801,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
|
||||
struct page *page = virt_to_page((void *)addr);
|
||||
struct page *last = page + nr;
|
||||
|
||||
split_page_owner(page, 1 << order);
|
||||
split_page_owner(page, order);
|
||||
split_page_memcg(page, order);
|
||||
while (page < --last)
|
||||
set_page_refcounted(last);
|
||||
|
@ -306,11 +306,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
|
||||
void __split_page_owner(struct page *page, unsigned int nr)
|
||||
void __split_page_owner(struct page *page, int order)
|
||||
{
|
||||
int i;
|
||||
struct page_ext *page_ext = page_ext_get(page);
|
||||
struct page_owner *page_owner;
|
||||
unsigned int nr = 1 << order;
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user