forked from Minki/linux
memcg: remove nid and zid argument from mem_cgroup_soft_limit_reclaim()
mem_cgroup_soft_limit_reclaim() has zone, nid and zid argument. but nid and zid can be calculated from zone. So remove it. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Nishimura Daisuke <d-nishimura@mtf.biglobe.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
14fec79680
commit
00918b6ab8
@ -123,8 +123,7 @@ static inline bool mem_cgroup_disabled(void)
|
||||
|
||||
void mem_cgroup_update_file_mapped(struct page *page, int val);
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask, int nid,
|
||||
int zid);
|
||||
gfp_t gfp_mask);
|
||||
u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
|
||||
|
||||
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
|
||||
@ -301,7 +300,7 @@ static inline void mem_cgroup_update_file_mapped(struct page *page,
|
||||
|
||||
static inline
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask, int nid, int zid)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -2865,8 +2865,7 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
|
||||
}
|
||||
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask, int nid,
|
||||
int zid)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long nr_reclaimed = 0;
|
||||
struct mem_cgroup_per_zone *mz, *next_mz = NULL;
|
||||
@ -2878,7 +2877,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
if (order > 0)
|
||||
return 0;
|
||||
|
||||
mctz = soft_limit_tree_node_zone(nid, zid);
|
||||
mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
|
||||
/*
|
||||
* This loop can run a while, specially if mem_cgroup's continuously
|
||||
* keep exceeding their soft limit and putting the system under
|
||||
|
@ -2168,7 +2168,6 @@ loop_again:
|
||||
for (i = 0; i <= end_zone; i++) {
|
||||
struct zone *zone = pgdat->node_zones + i;
|
||||
int nr_slab;
|
||||
int nid, zid;
|
||||
|
||||
if (!populated_zone(zone))
|
||||
continue;
|
||||
@ -2178,14 +2177,12 @@ loop_again:
|
||||
|
||||
sc.nr_scanned = 0;
|
||||
|
||||
nid = pgdat->node_id;
|
||||
zid = zone_idx(zone);
|
||||
/*
|
||||
* Call soft limit reclaim before calling shrink_zone.
|
||||
* For now we ignore the return value
|
||||
*/
|
||||
mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
|
||||
nid, zid);
|
||||
mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
|
||||
|
||||
/*
|
||||
* We put equal pressure on every zone, unless one
|
||||
* zone has way too many pages free already.
|
||||
|
Loading…
Reference in New Issue
Block a user