mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
mm: introduce node_zonelist() for accessing the zonelist for a GFP mask
Introduce a node_zonelist() helper function. It is used to lookup the appropriate zonelist given a node and a GFP mask. The patch on its own is a cleanup but it helps clarify parts of the two-zonelist-per-node patchset. If necessary, it can be merged with the next patch in this set without problems. Reviewed-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <clameter@sgi.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
dac1d27bc8
commit
0e88460da6
@ -271,8 +271,7 @@ static struct sysrq_key_op sysrq_term_op = {
|
|||||||
|
|
||||||
static void moom_callback(struct work_struct *ignored)
|
static void moom_callback(struct work_struct *ignored)
|
||||||
{
|
{
|
||||||
out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
|
out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
|
||||||
GFP_KERNEL, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static DECLARE_WORK(moom_work, moom_callback);
|
static DECLARE_WORK(moom_work, moom_callback);
|
||||||
|
@ -361,13 +361,13 @@ void invalidate_bdev(struct block_device *bdev)
|
|||||||
static void free_more_memory(void)
|
static void free_more_memory(void)
|
||||||
{
|
{
|
||||||
struct zonelist *zonelist;
|
struct zonelist *zonelist;
|
||||||
pg_data_t *pgdat;
|
int nid;
|
||||||
|
|
||||||
wakeup_pdflush(1024);
|
wakeup_pdflush(1024);
|
||||||
yield();
|
yield();
|
||||||
|
|
||||||
for_each_online_pgdat(pgdat) {
|
for_each_online_node(nid) {
|
||||||
zonelist = &pgdat->node_zonelists[gfp_zone(GFP_NOFS)];
|
zonelist = node_zonelist(nid, GFP_NOFS);
|
||||||
if (zonelist->zones[0])
|
if (zonelist->zones[0])
|
||||||
try_to_free_pages(zonelist, 0, GFP_NOFS);
|
try_to_free_pages(zonelist, 0, GFP_NOFS);
|
||||||
}
|
}
|
||||||
|
@ -154,10 +154,15 @@ static inline enum zone_type gfp_zone(gfp_t flags)
|
|||||||
/*
|
/*
|
||||||
* We get the zone list from the current node and the gfp_mask.
|
* We get the zone list from the current node and the gfp_mask.
|
||||||
* This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
|
* This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
|
||||||
|
* There are many zonelists per node, two for each active zone.
|
||||||
*
|
*
|
||||||
* For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
|
* For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
|
||||||
* optimized to &contig_page_data at compile-time.
|
* optimized to &contig_page_data at compile-time.
|
||||||
*/
|
*/
|
||||||
|
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
|
||||||
|
{
|
||||||
|
return NODE_DATA(nid)->node_zonelists + gfp_zone(flags);
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef HAVE_ARCH_FREE_PAGE
|
#ifndef HAVE_ARCH_FREE_PAGE
|
||||||
static inline void arch_free_page(struct page *page, int order) { }
|
static inline void arch_free_page(struct page *page, int order) { }
|
||||||
@ -178,8 +183,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
|
|||||||
if (nid < 0)
|
if (nid < 0)
|
||||||
nid = numa_node_id();
|
nid = numa_node_id();
|
||||||
|
|
||||||
return __alloc_pages(gfp_mask, order,
|
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
|
||||||
NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
@ -241,7 +241,7 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p)
|
|||||||
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
|
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
|
||||||
unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
|
unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
|
||||||
{
|
{
|
||||||
return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
|
return node_zonelist(0, gfp_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int do_migrate_pages(struct mm_struct *mm,
|
static inline int do_migrate_pages(struct mm_struct *mm,
|
||||||
|
@ -1183,7 +1183,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
|
|||||||
nd = 0;
|
nd = 0;
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
|
return node_zonelist(nd, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Do dynamic interleaving for a process */
|
/* Do dynamic interleaving for a process */
|
||||||
@ -1299,7 +1299,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
|
|||||||
if (unlikely(pol != &default_policy &&
|
if (unlikely(pol != &default_policy &&
|
||||||
pol != current->mempolicy))
|
pol != current->mempolicy))
|
||||||
__mpol_free(pol); /* finished with pol */
|
__mpol_free(pol); /* finished with pol */
|
||||||
return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
|
return node_zonelist(nid, gfp_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
zl = zonelist_policy(GFP_HIGHUSER, pol);
|
zl = zonelist_policy(GFP_HIGHUSER, pol);
|
||||||
@ -1321,7 +1321,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
|
|||||||
struct zonelist *zl;
|
struct zonelist *zl;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
|
zl = node_zonelist(nid, gfp);
|
||||||
page = __alloc_pages(gfp, order, zl);
|
page = __alloc_pages(gfp, order, zl);
|
||||||
if (page && page_zone(page) == zl->zones[0])
|
if (page && page_zone(page) == zl->zones[0])
|
||||||
inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
|
inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
|
||||||
|
@ -1713,10 +1713,9 @@ EXPORT_SYMBOL(free_pages);
|
|||||||
static unsigned int nr_free_zone_pages(int offset)
|
static unsigned int nr_free_zone_pages(int offset)
|
||||||
{
|
{
|
||||||
/* Just pick one node, since fallback list is circular */
|
/* Just pick one node, since fallback list is circular */
|
||||||
pg_data_t *pgdat = NODE_DATA(numa_node_id());
|
|
||||||
unsigned int sum = 0;
|
unsigned int sum = 0;
|
||||||
|
|
||||||
struct zonelist *zonelist = pgdat->node_zonelists + offset;
|
struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
|
||||||
struct zone **zonep = zonelist->zones;
|
struct zone **zonep = zonelist->zones;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
|
||||||
|
@ -3249,8 +3249,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
|
|||||||
if (flags & __GFP_THISNODE)
|
if (flags & __GFP_THISNODE)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
zonelist = &NODE_DATA(slab_node(current->mempolicy))
|
zonelist = node_zonelist(slab_node(current->mempolicy), flags);
|
||||||
->node_zonelists[gfp_zone(flags)];
|
|
||||||
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
|
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
|
@ -1309,8 +1309,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
|||||||
get_cycles() % 1024 > s->remote_node_defrag_ratio)
|
get_cycles() % 1024 > s->remote_node_defrag_ratio)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
zonelist = &NODE_DATA(
|
zonelist = node_zonelist(slab_node(current->mempolicy), flags);
|
||||||
slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
|
|
||||||
for (z = zonelist->zones; *z; z++) {
|
for (z = zonelist->zones; *z; z++) {
|
||||||
struct kmem_cache_node *n;
|
struct kmem_cache_node *n;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user