mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm: meminit: move page initialization into a separate function
Currently, memmap_init_zone() has all the smarts for initializing a single page. A subset of this is required for parallel page initialisation and so this patch breaks up the monolithic function in preparation. Signed-off-by: Robin Holt <holt@sgi.com> Signed-off-by: Nathan Zimmer <nzimmer@sgi.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Nate Zimmer <nzimmer@sgi.com> Tested-by: Waiman Long <waiman.long@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Robin Holt <robinmholt@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Waiman Long <waiman.long@hp.com> Cc: Scott Norton <scott.norton@hp.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8e7a7f8619
commit
1e8ce83cd1
@ -764,6 +764,51 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
||||||
|
unsigned long zone, int nid)
|
||||||
|
{
|
||||||
|
struct zone *z = &NODE_DATA(nid)->node_zones[zone];
|
||||||
|
|
||||||
|
set_page_links(page, zone, nid, pfn);
|
||||||
|
mminit_verify_page_links(page, zone, nid, pfn);
|
||||||
|
init_page_count(page);
|
||||||
|
page_mapcount_reset(page);
|
||||||
|
page_cpupid_reset_last(page);
|
||||||
|
SetPageReserved(page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mark the block movable so that blocks are reserved for
|
||||||
|
* movable at startup. This will force kernel allocations
|
||||||
|
* to reserve their blocks rather than leaking throughout
|
||||||
|
* the address space during boot when many long-lived
|
||||||
|
* kernel allocations are made. Later some blocks near
|
||||||
|
* the start are marked MIGRATE_RESERVE by
|
||||||
|
* setup_zone_migrate_reserve()
|
||||||
|
*
|
||||||
|
* bitmap is created for zone's valid pfn range. but memmap
|
||||||
|
* can be created for invalid pages (for alignment)
|
||||||
|
* check here not to call set_pageblock_migratetype() against
|
||||||
|
* pfn out of zone.
|
||||||
|
*/
|
||||||
|
if ((z->zone_start_pfn <= pfn)
|
||||||
|
&& (pfn < zone_end_pfn(z))
|
||||||
|
&& !(pfn & (pageblock_nr_pages - 1)))
|
||||||
|
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&page->lru);
|
||||||
|
#ifdef WANT_PAGE_VIRTUAL
|
||||||
|
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
|
||||||
|
if (!is_highmem_idx(zone))
|
||||||
|
set_page_address(page, __va(pfn << PAGE_SHIFT));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
|
||||||
|
int nid)
|
||||||
|
{
|
||||||
|
return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
|
||||||
|
}
|
||||||
|
|
||||||
static bool free_pages_prepare(struct page *page, unsigned int order)
|
static bool free_pages_prepare(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
bool compound = PageCompound(page);
|
bool compound = PageCompound(page);
|
||||||
@ -4212,7 +4257,6 @@ static void setup_zone_migrate_reserve(struct zone *zone)
|
|||||||
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
||||||
unsigned long start_pfn, enum memmap_context context)
|
unsigned long start_pfn, enum memmap_context context)
|
||||||
{
|
{
|
||||||
struct page *page;
|
|
||||||
unsigned long end_pfn = start_pfn + size;
|
unsigned long end_pfn = start_pfn + size;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
struct zone *z;
|
struct zone *z;
|
||||||
@ -4233,38 +4277,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|||||||
if (!early_pfn_in_nid(pfn, nid))
|
if (!early_pfn_in_nid(pfn, nid))
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
page = pfn_to_page(pfn);
|
__init_single_pfn(pfn, zone, nid);
|
||||||
set_page_links(page, zone, nid, pfn);
|
|
||||||
mminit_verify_page_links(page, zone, nid, pfn);
|
|
||||||
init_page_count(page);
|
|
||||||
page_mapcount_reset(page);
|
|
||||||
page_cpupid_reset_last(page);
|
|
||||||
SetPageReserved(page);
|
|
||||||
/*
|
|
||||||
* Mark the block movable so that blocks are reserved for
|
|
||||||
* movable at startup. This will force kernel allocations
|
|
||||||
* to reserve their blocks rather than leaking throughout
|
|
||||||
* the address space during boot when many long-lived
|
|
||||||
* kernel allocations are made. Later some blocks near
|
|
||||||
* the start are marked MIGRATE_RESERVE by
|
|
||||||
* setup_zone_migrate_reserve()
|
|
||||||
*
|
|
||||||
* bitmap is created for zone's valid pfn range. but memmap
|
|
||||||
* can be created for invalid pages (for alignment)
|
|
||||||
* check here not to call set_pageblock_migratetype() against
|
|
||||||
* pfn out of zone.
|
|
||||||
*/
|
|
||||||
if ((z->zone_start_pfn <= pfn)
|
|
||||||
&& (pfn < zone_end_pfn(z))
|
|
||||||
&& !(pfn & (pageblock_nr_pages - 1)))
|
|
||||||
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&page->lru);
|
|
||||||
#ifdef WANT_PAGE_VIRTUAL
|
|
||||||
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
|
|
||||||
if (!is_highmem_idx(zone))
|
|
||||||
set_page_address(page, __va(pfn << PAGE_SHIFT));
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user