mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
mm: list_lru: fix UAF for memory cgroup
The mem_cgroup_from_slab_obj() is supposed to be called under rcu lock or
cgroup_mutex or others which could prevent returned memcg from being
freed. Fix it by adding missing rcu read lock.
Found by code inspection.
[songmuchun@bytedance.com: only grab rcu lock when necessary, per Vlastimil]
Link: https://lkml.kernel.org/r/20240801024603.1865-1-songmuchun@bytedance.com
Link: https://lkml.kernel.org/r/20240718083607.42068-1-songmuchun@bytedance.com
Fixes: 0a97c01cd2
("list_lru: allow explicit memcg and NUMA node selection")
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7d4df2dad3
commit
5161b48712
@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
|
||||
}
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
/* The caller must ensure the memcg lifetime. */
|
||||
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
|
||||
|
||||
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
|
||||
{
|
||||
bool ret;
|
||||
int nid = page_to_nid(virt_to_page(item));
|
||||
struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
|
||||
mem_cgroup_from_slab_obj(item) : NULL;
|
||||
|
||||
return list_lru_add(lru, item, nid, memcg);
|
||||
if (list_lru_memcg_aware(lru)) {
|
||||
rcu_read_lock();
|
||||
ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
ret = list_lru_add(lru, item, nid, NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(list_lru_add_obj);
|
||||
|
||||
/* The caller must ensure the memcg lifetime. */
|
||||
bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
|
||||
struct mem_cgroup *memcg)
|
||||
{
|
||||
@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
|
||||
|
||||
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
|
||||
{
|
||||
bool ret;
|
||||
int nid = page_to_nid(virt_to_page(item));
|
||||
struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
|
||||
mem_cgroup_from_slab_obj(item) : NULL;
|
||||
|
||||
return list_lru_del(lru, item, nid, memcg);
|
||||
if (list_lru_memcg_aware(lru)) {
|
||||
rcu_read_lock();
|
||||
ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
ret = list_lru_del(lru, item, nid, NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(list_lru_del_obj);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user