memory controller: soft limit organize cgroups
Organize cgroups over soft limit in a RB-Tree Introduce an RB-Tree for storing memory cgroups that are over their soft limit. The overall goal is to 1. Add a memory cgroup to the RB-Tree when the soft limit is exceeded. We are careful about updates, updates take place only after a particular time interval has passed 2. We remove the node from the RB-Tree when the usage goes below the soft limit The next set of patches will exploit the RB-Tree to get the group that is over its soft limit by the largest amount and reclaim from it, when we face memory contention. [hugh.dickins@tiscali.co.uk: CONFIG_CGROUP_MEM_RES_CTLR=y CONFIG_PREEMPT=y fails to boot] Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Jiri Slaby <jirislaby@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
296c81d89f
commit
f64c3f5494
@ -114,7 +114,8 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
|
|||||||
int __must_check res_counter_charge_locked(struct res_counter *counter,
|
int __must_check res_counter_charge_locked(struct res_counter *counter,
|
||||||
unsigned long val);
|
unsigned long val);
|
||||||
int __must_check res_counter_charge(struct res_counter *counter,
|
int __must_check res_counter_charge(struct res_counter *counter,
|
||||||
unsigned long val, struct res_counter **limit_fail_at);
|
unsigned long val, struct res_counter **limit_fail_at,
|
||||||
|
struct res_counter **soft_limit_at);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* uncharge - tell that some portion of the resource is released
|
* uncharge - tell that some portion of the resource is released
|
||||||
@ -127,7 +128,8 @@ int __must_check res_counter_charge(struct res_counter *counter,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
|
void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
|
||||||
void res_counter_uncharge(struct res_counter *counter, unsigned long val);
|
void res_counter_uncharge(struct res_counter *counter, unsigned long val,
|
||||||
|
bool *was_soft_limit_excess);
|
||||||
|
|
||||||
static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
|
static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
|
||||||
{
|
{
|
||||||
|
@ -37,17 +37,27 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int res_counter_charge(struct res_counter *counter, unsigned long val,
|
int res_counter_charge(struct res_counter *counter, unsigned long val,
|
||||||
struct res_counter **limit_fail_at)
|
struct res_counter **limit_fail_at,
|
||||||
|
struct res_counter **soft_limit_fail_at)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct res_counter *c, *u;
|
struct res_counter *c, *u;
|
||||||
|
|
||||||
*limit_fail_at = NULL;
|
*limit_fail_at = NULL;
|
||||||
|
if (soft_limit_fail_at)
|
||||||
|
*soft_limit_fail_at = NULL;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
for (c = counter; c != NULL; c = c->parent) {
|
for (c = counter; c != NULL; c = c->parent) {
|
||||||
spin_lock(&c->lock);
|
spin_lock(&c->lock);
|
||||||
ret = res_counter_charge_locked(c, val);
|
ret = res_counter_charge_locked(c, val);
|
||||||
|
/*
|
||||||
|
* With soft limits, we return the highest ancestor
|
||||||
|
* that exceeds its soft limit
|
||||||
|
*/
|
||||||
|
if (soft_limit_fail_at &&
|
||||||
|
!res_counter_soft_limit_check_locked(c))
|
||||||
|
*soft_limit_fail_at = c;
|
||||||
spin_unlock(&c->lock);
|
spin_unlock(&c->lock);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
*limit_fail_at = c;
|
*limit_fail_at = c;
|
||||||
@ -75,7 +85,8 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
|
|||||||
counter->usage -= val;
|
counter->usage -= val;
|
||||||
}
|
}
|
||||||
|
|
||||||
void res_counter_uncharge(struct res_counter *counter, unsigned long val)
|
void res_counter_uncharge(struct res_counter *counter, unsigned long val,
|
||||||
|
bool *was_soft_limit_excess)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct res_counter *c;
|
struct res_counter *c;
|
||||||
@ -83,6 +94,9 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
for (c = counter; c != NULL; c = c->parent) {
|
for (c = counter; c != NULL; c = c->parent) {
|
||||||
spin_lock(&c->lock);
|
spin_lock(&c->lock);
|
||||||
|
if (was_soft_limit_excess)
|
||||||
|
*was_soft_limit_excess =
|
||||||
|
!res_counter_soft_limit_check_locked(c);
|
||||||
res_counter_uncharge_locked(c, val);
|
res_counter_uncharge_locked(c, val);
|
||||||
spin_unlock(&c->lock);
|
spin_unlock(&c->lock);
|
||||||
}
|
}
|
||||||
|
300
mm/memcontrol.c
300
mm/memcontrol.c
@ -29,6 +29,7 @@
|
|||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/limits.h>
|
#include <linux/limits.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
|
#include <linux/rbtree.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
@ -54,6 +55,7 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
|
static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
|
||||||
|
#define SOFTLIMIT_EVENTS_THRESH (1000)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Statistics for memory cgroup.
|
* Statistics for memory cgroup.
|
||||||
@ -67,6 +69,7 @@ enum mem_cgroup_stat_index {
|
|||||||
MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
|
MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
|
||||||
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
|
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
|
||||||
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
|
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
|
||||||
|
MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
|
||||||
|
|
||||||
MEM_CGROUP_STAT_NSTATS,
|
MEM_CGROUP_STAT_NSTATS,
|
||||||
};
|
};
|
||||||
@ -79,6 +82,20 @@ struct mem_cgroup_stat {
|
|||||||
struct mem_cgroup_stat_cpu cpustat[0];
|
struct mem_cgroup_stat_cpu cpustat[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
__mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat,
|
||||||
|
enum mem_cgroup_stat_index idx)
|
||||||
|
{
|
||||||
|
stat->count[idx] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline s64
|
||||||
|
__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
|
||||||
|
enum mem_cgroup_stat_index idx)
|
||||||
|
{
|
||||||
|
return stat->count[idx];
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For accounting under irq disable, no need for increment preempt count.
|
* For accounting under irq disable, no need for increment preempt count.
|
||||||
*/
|
*/
|
||||||
@ -118,6 +135,10 @@ struct mem_cgroup_per_zone {
|
|||||||
unsigned long count[NR_LRU_LISTS];
|
unsigned long count[NR_LRU_LISTS];
|
||||||
|
|
||||||
struct zone_reclaim_stat reclaim_stat;
|
struct zone_reclaim_stat reclaim_stat;
|
||||||
|
struct rb_node tree_node; /* RB tree node */
|
||||||
|
unsigned long long usage_in_excess;/* Set to the value by which */
|
||||||
|
/* the soft limit is exceeded*/
|
||||||
|
bool on_tree;
|
||||||
};
|
};
|
||||||
/* Macro for accessing counter */
|
/* Macro for accessing counter */
|
||||||
#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
|
#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
|
||||||
@ -130,6 +151,26 @@ struct mem_cgroup_lru_info {
|
|||||||
struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
|
struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cgroups above their limits are maintained in a RB-Tree, independent of
|
||||||
|
* their hierarchy representation
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct mem_cgroup_tree_per_zone {
|
||||||
|
struct rb_root rb_root;
|
||||||
|
spinlock_t lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mem_cgroup_tree_per_node {
|
||||||
|
struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mem_cgroup_tree {
|
||||||
|
struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct mem_cgroup_tree soft_limit_tree __read_mostly;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The memory controller data structure. The memory controller controls both
|
* The memory controller data structure. The memory controller controls both
|
||||||
* page cache and RSS per cgroup. We would eventually like to provide
|
* page cache and RSS per cgroup. We would eventually like to provide
|
||||||
@ -215,6 +256,150 @@ static void mem_cgroup_get(struct mem_cgroup *mem);
|
|||||||
static void mem_cgroup_put(struct mem_cgroup *mem);
|
static void mem_cgroup_put(struct mem_cgroup *mem);
|
||||||
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
|
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
|
||||||
|
|
||||||
|
static struct mem_cgroup_per_zone *
|
||||||
|
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
|
||||||
|
{
|
||||||
|
return &mem->info.nodeinfo[nid]->zoneinfo[zid];
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct mem_cgroup_per_zone *
|
||||||
|
page_cgroup_zoneinfo(struct page_cgroup *pc)
|
||||||
|
{
|
||||||
|
struct mem_cgroup *mem = pc->mem_cgroup;
|
||||||
|
int nid = page_cgroup_nid(pc);
|
||||||
|
int zid = page_cgroup_zid(pc);
|
||||||
|
|
||||||
|
if (!mem)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return mem_cgroup_zoneinfo(mem, nid, zid);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct mem_cgroup_tree_per_zone *
|
||||||
|
soft_limit_tree_node_zone(int nid, int zid)
|
||||||
|
{
|
||||||
|
return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct mem_cgroup_tree_per_zone *
|
||||||
|
soft_limit_tree_from_page(struct page *page)
|
||||||
|
{
|
||||||
|
int nid = page_to_nid(page);
|
||||||
|
int zid = page_zonenum(page);
|
||||||
|
|
||||||
|
return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
|
||||||
|
struct mem_cgroup_per_zone *mz,
|
||||||
|
struct mem_cgroup_tree_per_zone *mctz)
|
||||||
|
{
|
||||||
|
struct rb_node **p = &mctz->rb_root.rb_node;
|
||||||
|
struct rb_node *parent = NULL;
|
||||||
|
struct mem_cgroup_per_zone *mz_node;
|
||||||
|
|
||||||
|
if (mz->on_tree)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res);
|
||||||
|
spin_lock(&mctz->lock);
|
||||||
|
while (*p) {
|
||||||
|
parent = *p;
|
||||||
|
mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
|
||||||
|
tree_node);
|
||||||
|
if (mz->usage_in_excess < mz_node->usage_in_excess)
|
||||||
|
p = &(*p)->rb_left;
|
||||||
|
/*
|
||||||
|
* We can't avoid mem cgroups that are over their soft
|
||||||
|
* limit by the same amount
|
||||||
|
*/
|
||||||
|
else if (mz->usage_in_excess >= mz_node->usage_in_excess)
|
||||||
|
p = &(*p)->rb_right;
|
||||||
|
}
|
||||||
|
rb_link_node(&mz->tree_node, parent, p);
|
||||||
|
rb_insert_color(&mz->tree_node, &mctz->rb_root);
|
||||||
|
mz->on_tree = true;
|
||||||
|
spin_unlock(&mctz->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
|
||||||
|
struct mem_cgroup_per_zone *mz,
|
||||||
|
struct mem_cgroup_tree_per_zone *mctz)
|
||||||
|
{
|
||||||
|
spin_lock(&mctz->lock);
|
||||||
|
rb_erase(&mz->tree_node, &mctz->rb_root);
|
||||||
|
mz->on_tree = false;
|
||||||
|
spin_unlock(&mctz->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
|
||||||
|
{
|
||||||
|
bool ret = false;
|
||||||
|
int cpu;
|
||||||
|
s64 val;
|
||||||
|
struct mem_cgroup_stat_cpu *cpustat;
|
||||||
|
|
||||||
|
cpu = get_cpu();
|
||||||
|
cpustat = &mem->stat.cpustat[cpu];
|
||||||
|
val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS);
|
||||||
|
if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
|
||||||
|
__mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS);
|
||||||
|
ret = true;
|
||||||
|
}
|
||||||
|
put_cpu();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
|
||||||
|
{
|
||||||
|
unsigned long long prev_usage_in_excess, new_usage_in_excess;
|
||||||
|
bool updated_tree = false;
|
||||||
|
struct mem_cgroup_per_zone *mz;
|
||||||
|
struct mem_cgroup_tree_per_zone *mctz;
|
||||||
|
|
||||||
|
mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page));
|
||||||
|
mctz = soft_limit_tree_from_page(page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We do updates in lazy mode, mem's are removed
|
||||||
|
* lazily from the per-zone, per-node rb tree
|
||||||
|
*/
|
||||||
|
prev_usage_in_excess = mz->usage_in_excess;
|
||||||
|
|
||||||
|
new_usage_in_excess = res_counter_soft_limit_excess(&mem->res);
|
||||||
|
if (prev_usage_in_excess) {
|
||||||
|
mem_cgroup_remove_exceeded(mem, mz, mctz);
|
||||||
|
updated_tree = true;
|
||||||
|
}
|
||||||
|
if (!new_usage_in_excess)
|
||||||
|
goto done;
|
||||||
|
mem_cgroup_insert_exceeded(mem, mz, mctz);
|
||||||
|
|
||||||
|
done:
|
||||||
|
if (updated_tree) {
|
||||||
|
spin_lock(&mctz->lock);
|
||||||
|
mz->usage_in_excess = new_usage_in_excess;
|
||||||
|
spin_unlock(&mctz->lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
|
||||||
|
{
|
||||||
|
int node, zone;
|
||||||
|
struct mem_cgroup_per_zone *mz;
|
||||||
|
struct mem_cgroup_tree_per_zone *mctz;
|
||||||
|
|
||||||
|
for_each_node_state(node, N_POSSIBLE) {
|
||||||
|
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
||||||
|
mz = mem_cgroup_zoneinfo(mem, node, zone);
|
||||||
|
mctz = soft_limit_tree_node_zone(node, zone);
|
||||||
|
mem_cgroup_remove_exceeded(mem, mz, mctz);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
|
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
|
||||||
struct page_cgroup *pc,
|
struct page_cgroup *pc,
|
||||||
bool charge)
|
bool charge)
|
||||||
@ -236,28 +421,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
|
|||||||
else
|
else
|
||||||
__mem_cgroup_stat_add_safe(cpustat,
|
__mem_cgroup_stat_add_safe(cpustat,
|
||||||
MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
|
MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
|
||||||
|
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1);
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mem_cgroup_per_zone *
|
|
||||||
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
|
|
||||||
{
|
|
||||||
return &mem->info.nodeinfo[nid]->zoneinfo[zid];
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct mem_cgroup_per_zone *
|
|
||||||
page_cgroup_zoneinfo(struct page_cgroup *pc)
|
|
||||||
{
|
|
||||||
struct mem_cgroup *mem = pc->mem_cgroup;
|
|
||||||
int nid = page_cgroup_nid(pc);
|
|
||||||
int zid = page_cgroup_zid(pc);
|
|
||||||
|
|
||||||
if (!mem)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return mem_cgroup_zoneinfo(mem, nid, zid);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
|
static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
|
||||||
enum lru_list idx)
|
enum lru_list idx)
|
||||||
{
|
{
|
||||||
@ -972,11 +1139,11 @@ done:
|
|||||||
*/
|
*/
|
||||||
static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
||||||
gfp_t gfp_mask, struct mem_cgroup **memcg,
|
gfp_t gfp_mask, struct mem_cgroup **memcg,
|
||||||
bool oom)
|
bool oom, struct page *page)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *mem, *mem_over_limit;
|
struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit;
|
||||||
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
||||||
struct res_counter *fail_res;
|
struct res_counter *fail_res, *soft_fail_res = NULL;
|
||||||
|
|
||||||
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
|
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
|
||||||
/* Don't account this! */
|
/* Don't account this! */
|
||||||
@ -1006,16 +1173,17 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|||||||
int ret;
|
int ret;
|
||||||
bool noswap = false;
|
bool noswap = false;
|
||||||
|
|
||||||
ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
|
ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res,
|
||||||
|
&soft_fail_res);
|
||||||
if (likely(!ret)) {
|
if (likely(!ret)) {
|
||||||
if (!do_swap_account)
|
if (!do_swap_account)
|
||||||
break;
|
break;
|
||||||
ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
|
ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
|
||||||
&fail_res);
|
&fail_res, NULL);
|
||||||
if (likely(!ret))
|
if (likely(!ret))
|
||||||
break;
|
break;
|
||||||
/* mem+swap counter fails */
|
/* mem+swap counter fails */
|
||||||
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
|
||||||
noswap = true;
|
noswap = true;
|
||||||
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
|
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
|
||||||
memsw);
|
memsw);
|
||||||
@ -1053,13 +1221,23 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
|||||||
goto nomem;
|
goto nomem;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* Insert just the ancestor, we should trickle down to the correct
|
||||||
|
* cgroup for reclaim, since the other nodes will be below their
|
||||||
|
* soft limit
|
||||||
|
*/
|
||||||
|
if (soft_fail_res) {
|
||||||
|
mem_over_soft_limit =
|
||||||
|
mem_cgroup_from_res_counter(soft_fail_res, res);
|
||||||
|
if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
|
||||||
|
mem_cgroup_update_tree(mem_over_soft_limit, page);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
nomem:
|
nomem:
|
||||||
css_put(&mem->css);
|
css_put(&mem->css);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A helper function to get mem_cgroup from ID. must be called under
|
* A helper function to get mem_cgroup from ID. must be called under
|
||||||
* rcu_read_lock(). The caller must check css_is_removed() or some if
|
* rcu_read_lock(). The caller must check css_is_removed() or some if
|
||||||
@ -1126,9 +1304,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
|
|||||||
lock_page_cgroup(pc);
|
lock_page_cgroup(pc);
|
||||||
if (unlikely(PageCgroupUsed(pc))) {
|
if (unlikely(PageCgroupUsed(pc))) {
|
||||||
unlock_page_cgroup(pc);
|
unlock_page_cgroup(pc);
|
||||||
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
|
||||||
if (do_swap_account)
|
if (do_swap_account)
|
||||||
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
|
res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
|
||||||
css_put(&mem->css);
|
css_put(&mem->css);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1205,7 +1383,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
|
|||||||
if (pc->mem_cgroup != from)
|
if (pc->mem_cgroup != from)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
res_counter_uncharge(&from->res, PAGE_SIZE);
|
res_counter_uncharge(&from->res, PAGE_SIZE, NULL);
|
||||||
mem_cgroup_charge_statistics(from, pc, false);
|
mem_cgroup_charge_statistics(from, pc, false);
|
||||||
|
|
||||||
page = pc->page;
|
page = pc->page;
|
||||||
@ -1225,7 +1403,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (do_swap_account)
|
if (do_swap_account)
|
||||||
res_counter_uncharge(&from->memsw, PAGE_SIZE);
|
res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL);
|
||||||
css_put(&from->css);
|
css_put(&from->css);
|
||||||
|
|
||||||
css_get(&to->css);
|
css_get(&to->css);
|
||||||
@ -1265,7 +1443,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
|
|||||||
parent = mem_cgroup_from_cont(pcg);
|
parent = mem_cgroup_from_cont(pcg);
|
||||||
|
|
||||||
|
|
||||||
ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
|
ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
|
||||||
if (ret || !parent)
|
if (ret || !parent)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -1295,9 +1473,9 @@ uncharge:
|
|||||||
/* drop extra refcnt by try_charge() */
|
/* drop extra refcnt by try_charge() */
|
||||||
css_put(&parent->css);
|
css_put(&parent->css);
|
||||||
/* uncharge if move fails */
|
/* uncharge if move fails */
|
||||||
res_counter_uncharge(&parent->res, PAGE_SIZE);
|
res_counter_uncharge(&parent->res, PAGE_SIZE, NULL);
|
||||||
if (do_swap_account)
|
if (do_swap_account)
|
||||||
res_counter_uncharge(&parent->memsw, PAGE_SIZE);
|
res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1322,7 +1500,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
|
|||||||
prefetchw(pc);
|
prefetchw(pc);
|
||||||
|
|
||||||
mem = memcg;
|
mem = memcg;
|
||||||
ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
|
ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
|
||||||
if (ret || !mem)
|
if (ret || !mem)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -1441,14 +1619,14 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
|
|||||||
if (!mem)
|
if (!mem)
|
||||||
goto charge_cur_mm;
|
goto charge_cur_mm;
|
||||||
*ptr = mem;
|
*ptr = mem;
|
||||||
ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
|
ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
|
||||||
/* drop extra refcnt from tryget */
|
/* drop extra refcnt from tryget */
|
||||||
css_put(&mem->css);
|
css_put(&mem->css);
|
||||||
return ret;
|
return ret;
|
||||||
charge_cur_mm:
|
charge_cur_mm:
|
||||||
if (unlikely(!mm))
|
if (unlikely(!mm))
|
||||||
mm = &init_mm;
|
mm = &init_mm;
|
||||||
return __mem_cgroup_try_charge(mm, mask, ptr, true);
|
return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1486,7 +1664,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
|
|||||||
* This recorded memcg can be obsolete one. So, avoid
|
* This recorded memcg can be obsolete one. So, avoid
|
||||||
* calling css_tryget
|
* calling css_tryget
|
||||||
*/
|
*/
|
||||||
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
|
res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL);
|
||||||
mem_cgroup_put(memcg);
|
mem_cgroup_put(memcg);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -1511,9 +1689,9 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
|
|||||||
return;
|
return;
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return;
|
return;
|
||||||
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
|
||||||
if (do_swap_account)
|
if (do_swap_account)
|
||||||
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
|
res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
|
||||||
css_put(&mem->css);
|
css_put(&mem->css);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1527,6 +1705,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|||||||
struct page_cgroup *pc;
|
struct page_cgroup *pc;
|
||||||
struct mem_cgroup *mem = NULL;
|
struct mem_cgroup *mem = NULL;
|
||||||
struct mem_cgroup_per_zone *mz;
|
struct mem_cgroup_per_zone *mz;
|
||||||
|
bool soft_limit_excess = false;
|
||||||
|
|
||||||
if (mem_cgroup_disabled())
|
if (mem_cgroup_disabled())
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -1565,9 +1744,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess);
|
||||||
if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
|
if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
|
||||||
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
|
res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
|
||||||
mem_cgroup_charge_statistics(mem, pc, false);
|
mem_cgroup_charge_statistics(mem, pc, false);
|
||||||
|
|
||||||
ClearPageCgroupUsed(pc);
|
ClearPageCgroupUsed(pc);
|
||||||
@ -1581,6 +1760,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
|
|||||||
mz = page_cgroup_zoneinfo(pc);
|
mz = page_cgroup_zoneinfo(pc);
|
||||||
unlock_page_cgroup(pc);
|
unlock_page_cgroup(pc);
|
||||||
|
|
||||||
|
if (soft_limit_excess && mem_cgroup_soft_limit_check(mem))
|
||||||
|
mem_cgroup_update_tree(mem, page);
|
||||||
/* at swapout, this memcg will be accessed to record to swap */
|
/* at swapout, this memcg will be accessed to record to swap */
|
||||||
if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
|
if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
|
||||||
css_put(&mem->css);
|
css_put(&mem->css);
|
||||||
@ -1656,7 +1837,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
|
|||||||
* We uncharge this because swap is freed.
|
* We uncharge this because swap is freed.
|
||||||
* This memcg can be obsolete one. We avoid calling css_tryget
|
* This memcg can be obsolete one. We avoid calling css_tryget
|
||||||
*/
|
*/
|
||||||
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
|
res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL);
|
||||||
mem_cgroup_put(memcg);
|
mem_cgroup_put(memcg);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -1685,7 +1866,8 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
|
|||||||
unlock_page_cgroup(pc);
|
unlock_page_cgroup(pc);
|
||||||
|
|
||||||
if (mem) {
|
if (mem) {
|
||||||
ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
|
ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
|
||||||
|
page);
|
||||||
css_put(&mem->css);
|
css_put(&mem->css);
|
||||||
}
|
}
|
||||||
*ptr = mem;
|
*ptr = mem;
|
||||||
@ -2194,6 +2376,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
|
|||||||
res_counter_reset_failcnt(&mem->memsw);
|
res_counter_reset_failcnt(&mem->memsw);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2489,6 +2672,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
|
|||||||
mz = &pn->zoneinfo[zone];
|
mz = &pn->zoneinfo[zone];
|
||||||
for_each_lru(l)
|
for_each_lru(l)
|
||||||
INIT_LIST_HEAD(&mz->lists[l]);
|
INIT_LIST_HEAD(&mz->lists[l]);
|
||||||
|
mz->usage_in_excess = 0;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2534,6 +2718,7 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
|
|||||||
{
|
{
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
|
mem_cgroup_remove_from_trees(mem);
|
||||||
free_css_id(&mem_cgroup_subsys, &mem->css);
|
free_css_id(&mem_cgroup_subsys, &mem->css);
|
||||||
|
|
||||||
for_each_node_state(node, N_POSSIBLE)
|
for_each_node_state(node, N_POSSIBLE)
|
||||||
@ -2582,6 +2767,31 @@ static void __init enable_swap_cgroup(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static int mem_cgroup_soft_limit_tree_init(void)
|
||||||
|
{
|
||||||
|
struct mem_cgroup_tree_per_node *rtpn;
|
||||||
|
struct mem_cgroup_tree_per_zone *rtpz;
|
||||||
|
int tmp, node, zone;
|
||||||
|
|
||||||
|
for_each_node_state(node, N_POSSIBLE) {
|
||||||
|
tmp = node;
|
||||||
|
if (!node_state(node, N_NORMAL_MEMORY))
|
||||||
|
tmp = -1;
|
||||||
|
rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
|
||||||
|
if (!rtpn)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
soft_limit_tree.rb_tree_per_node[node] = rtpn;
|
||||||
|
|
||||||
|
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
||||||
|
rtpz = &rtpn->rb_tree_per_zone[zone];
|
||||||
|
rtpz->rb_root = RB_ROOT;
|
||||||
|
spin_lock_init(&rtpz->lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct cgroup_subsys_state * __ref
|
static struct cgroup_subsys_state * __ref
|
||||||
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
|
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||||
{
|
{
|
||||||
@ -2596,11 +2806,15 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
|
|||||||
for_each_node_state(node, N_POSSIBLE)
|
for_each_node_state(node, N_POSSIBLE)
|
||||||
if (alloc_mem_cgroup_per_zone_info(mem, node))
|
if (alloc_mem_cgroup_per_zone_info(mem, node))
|
||||||
goto free_out;
|
goto free_out;
|
||||||
|
|
||||||
/* root ? */
|
/* root ? */
|
||||||
if (cont->parent == NULL) {
|
if (cont->parent == NULL) {
|
||||||
enable_swap_cgroup();
|
enable_swap_cgroup();
|
||||||
parent = NULL;
|
parent = NULL;
|
||||||
root_mem_cgroup = mem;
|
root_mem_cgroup = mem;
|
||||||
|
if (mem_cgroup_soft_limit_tree_init())
|
||||||
|
goto free_out;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
parent = mem_cgroup_from_cont(cont->parent);
|
parent = mem_cgroup_from_cont(cont->parent);
|
||||||
mem->use_hierarchy = parent->use_hierarchy;
|
mem->use_hierarchy = parent->use_hierarchy;
|
||||||
|
Loading…
Reference in New Issue
Block a user