mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
mm: uninline page_xchg_last_nid()
Andrew Morton pointed out that page_xchg_last_nid() and reset_page_last_nid() were "getting nuttily large" and asked that it be investigated. reset_page_last_nid() is on the page free path and it would be unfortunate to make that path more expensive than it needs to be. Due to the internal use of page_xchg_last_nid() it is already too expensive but fortunately, it should also be impossible for the page->flags to be updated in parallel when we call reset_page_last_nid(). Instead of unlining the function, it uses a simplier implementation that assumes no parallel updates and should now be sufficiently short for inlining. page_xchg_last_nid() is called in paths that are already quite expensive (splitting huge page, fault handling, migration) and it is reasonable to uninline. There was not really a good place to place the function but mm/mmzone.c was the closest fit IMO. This patch saved 128 bytes of text in the vmlinux file for the kernel configuration I used for testing automatic NUMA balancing. Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6acc8b0251
commit
4468b8f1e2
@ -677,25 +677,14 @@ static inline int page_last_nid(struct page *page)
|
||||
return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
|
||||
}
|
||||
|
||||
static inline int page_xchg_last_nid(struct page *page, int nid)
|
||||
{
|
||||
unsigned long old_flags, flags;
|
||||
int last_nid;
|
||||
|
||||
do {
|
||||
old_flags = flags = page->flags;
|
||||
last_nid = page_last_nid(page);
|
||||
|
||||
flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
|
||||
flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
|
||||
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
|
||||
|
||||
return last_nid;
|
||||
}
|
||||
extern int page_xchg_last_nid(struct page *page, int nid);
|
||||
|
||||
static inline void reset_page_last_nid(struct page *page)
|
||||
{
|
||||
page_xchg_last_nid(page, (1 << LAST_NID_SHIFT) - 1);
|
||||
int nid = (1 << LAST_NID_SHIFT) - 1;
|
||||
|
||||
page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
|
||||
page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
|
||||
}
|
||||
#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
|
||||
#else
|
||||
|
20
mm/mmzone.c
20
mm/mmzone.c
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* linux/mm/mmzone.c
|
||||
*
|
||||
* management codes for pgdats and zones.
|
||||
* management codes for pgdats, zones and page flags
|
||||
*/
|
||||
|
||||
|
||||
@ -96,3 +96,21 @@ void lruvec_init(struct lruvec *lruvec)
|
||||
for_each_lru(lru)
|
||||
INIT_LIST_HEAD(&lruvec->lists[lru]);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS)
|
||||
int page_xchg_last_nid(struct page *page, int nid)
|
||||
{
|
||||
unsigned long old_flags, flags;
|
||||
int last_nid;
|
||||
|
||||
do {
|
||||
old_flags = flags = page->flags;
|
||||
last_nid = page_last_nid(page);
|
||||
|
||||
flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
|
||||
flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
|
||||
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
|
||||
|
||||
return last_nid;
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user