mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm: list_lru: only add memcg-aware lrus to the global lru list
The non-memcg-aware lru is always skiped when traversing the global lru list, which is not efficient. We can only add the memcg-aware lru to the global lru list instead to make traversing more efficient. Link: https://lkml.kernel.org/r/20211025124353.55781-1-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e80216d9f1
commit
3eef11279b
@ -15,18 +15,29 @@
|
||||
#include "slab.h"
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
static LIST_HEAD(list_lrus);
|
||||
static LIST_HEAD(memcg_list_lrus);
|
||||
static DEFINE_MUTEX(list_lrus_mutex);
|
||||
|
||||
static inline bool list_lru_memcg_aware(struct list_lru *lru)
|
||||
{
|
||||
return lru->memcg_aware;
|
||||
}
|
||||
|
||||
static void list_lru_register(struct list_lru *lru)
|
||||
{
|
||||
if (!list_lru_memcg_aware(lru))
|
||||
return;
|
||||
|
||||
mutex_lock(&list_lrus_mutex);
|
||||
list_add(&lru->list, &list_lrus);
|
||||
list_add(&lru->list, &memcg_list_lrus);
|
||||
mutex_unlock(&list_lrus_mutex);
|
||||
}
|
||||
|
||||
static void list_lru_unregister(struct list_lru *lru)
|
||||
{
|
||||
if (!list_lru_memcg_aware(lru))
|
||||
return;
|
||||
|
||||
mutex_lock(&list_lrus_mutex);
|
||||
list_del(&lru->list);
|
||||
mutex_unlock(&list_lrus_mutex);
|
||||
@ -37,11 +48,6 @@ static int lru_shrinker_id(struct list_lru *lru)
|
||||
return lru->shrinker_id;
|
||||
}
|
||||
|
||||
static inline bool list_lru_memcg_aware(struct list_lru *lru)
|
||||
{
|
||||
return lru->memcg_aware;
|
||||
}
|
||||
|
||||
static inline struct list_lru_one *
|
||||
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
|
||||
{
|
||||
@ -457,9 +463,6 @@ static int memcg_update_list_lru(struct list_lru *lru,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!list_lru_memcg_aware(lru))
|
||||
return 0;
|
||||
|
||||
for_each_node(i) {
|
||||
if (memcg_update_list_lru_node(&lru->node[i],
|
||||
old_size, new_size))
|
||||
@ -482,9 +485,6 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!list_lru_memcg_aware(lru))
|
||||
return;
|
||||
|
||||
for_each_node(i)
|
||||
memcg_cancel_update_list_lru_node(&lru->node[i],
|
||||
old_size, new_size);
|
||||
@ -497,7 +497,7 @@ int memcg_update_all_list_lrus(int new_size)
|
||||
int old_size = memcg_nr_cache_ids;
|
||||
|
||||
mutex_lock(&list_lrus_mutex);
|
||||
list_for_each_entry(lru, &list_lrus, list) {
|
||||
list_for_each_entry(lru, &memcg_list_lrus, list) {
|
||||
ret = memcg_update_list_lru(lru, old_size, new_size);
|
||||
if (ret)
|
||||
goto fail;
|
||||
@ -506,7 +506,7 @@ out:
|
||||
mutex_unlock(&list_lrus_mutex);
|
||||
return ret;
|
||||
fail:
|
||||
list_for_each_entry_continue_reverse(lru, &list_lrus, list)
|
||||
list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list)
|
||||
memcg_cancel_update_list_lru(lru, old_size, new_size);
|
||||
goto out;
|
||||
}
|
||||
@ -543,9 +543,6 @@ static void memcg_drain_list_lru(struct list_lru *lru,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!list_lru_memcg_aware(lru))
|
||||
return;
|
||||
|
||||
for_each_node(i)
|
||||
memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
|
||||
}
|
||||
@ -555,7 +552,7 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
|
||||
struct list_lru *lru;
|
||||
|
||||
mutex_lock(&list_lrus_mutex);
|
||||
list_for_each_entry(lru, &list_lrus, list)
|
||||
list_for_each_entry(lru, &memcg_list_lrus, list)
|
||||
memcg_drain_list_lru(lru, src_idx, dst_memcg);
|
||||
mutex_unlock(&list_lrus_mutex);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user