mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
mm: memcg/slab: uncharge during kmem_cache_free_bulk()
Object cgroup charging is done for all the objects during allocation, but
during freeing, uncharging ends up happening for only one object in the
case of bulk allocation/freeing.
Fix this by having a separate call to uncharge all the objects from
kmem_cache_free_bulk() and by modifying memcg_slab_free_hook() to take
care of bulk uncharging.
Fixes: 964d4bd370
("mm: memcg/slab: save obj_cgroup for non-root slab objects"
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Roman Gushchin <guro@fb.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: <stable@vger.kernel.org>
Link: https://lkml.kernel.org/r/20201009060423.390479-1-bharata@linux.ibm.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7a52d4d88a
commit
d1b2cf6cb8
@ -3438,7 +3438,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
|
||||
memset(objp, 0, cachep->object_size);
|
||||
kmemleak_free_recursive(objp, cachep->flags);
|
||||
objp = cache_free_debugcheck(cachep, objp, caller);
|
||||
memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
|
||||
memcg_slab_free_hook(cachep, &objp, 1);
|
||||
|
||||
/*
|
||||
* Skip calling cache_free_alien() when the platform is not numa.
|
||||
|
42
mm/slab.h
42
mm/slab.h
@ -345,30 +345,42 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
|
||||
obj_cgroup_put(objcg);
|
||||
}
|
||||
|
||||
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
|
||||
void *p)
|
||||
static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
|
||||
void **p, int objects)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
struct obj_cgroup *objcg;
|
||||
struct page *page;
|
||||
unsigned int off;
|
||||
int i;
|
||||
|
||||
if (!memcg_kmem_enabled())
|
||||
return;
|
||||
|
||||
if (!page_has_obj_cgroups(page))
|
||||
return;
|
||||
for (i = 0; i < objects; i++) {
|
||||
if (unlikely(!p[i]))
|
||||
continue;
|
||||
|
||||
off = obj_to_index(s, page, p);
|
||||
objcg = page_obj_cgroups(page)[off];
|
||||
page_obj_cgroups(page)[off] = NULL;
|
||||
page = virt_to_head_page(p[i]);
|
||||
if (!page_has_obj_cgroups(page))
|
||||
continue;
|
||||
|
||||
if (!objcg)
|
||||
return;
|
||||
if (!s_orig)
|
||||
s = page->slab_cache;
|
||||
else
|
||||
s = s_orig;
|
||||
|
||||
obj_cgroup_uncharge(objcg, obj_full_size(s));
|
||||
mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
|
||||
-obj_full_size(s));
|
||||
off = obj_to_index(s, page, p[i]);
|
||||
objcg = page_obj_cgroups(page)[off];
|
||||
if (!objcg)
|
||||
continue;
|
||||
|
||||
obj_cgroup_put(objcg);
|
||||
page_obj_cgroups(page)[off] = NULL;
|
||||
obj_cgroup_uncharge(objcg, obj_full_size(s));
|
||||
mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
|
||||
-obj_full_size(s));
|
||||
obj_cgroup_put(objcg);
|
||||
}
|
||||
}
|
||||
|
||||
#else /* CONFIG_MEMCG_KMEM */
|
||||
@ -406,8 +418,8 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
|
||||
void *p)
|
||||
static inline void memcg_slab_free_hook(struct kmem_cache *s,
|
||||
void **p, int objects)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_MEMCG_KMEM */
|
||||
|
@ -3095,7 +3095,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
|
||||
struct kmem_cache_cpu *c;
|
||||
unsigned long tid;
|
||||
|
||||
memcg_slab_free_hook(s, page, head);
|
||||
memcg_slab_free_hook(s, &head, 1);
|
||||
redo:
|
||||
/*
|
||||
* Determine the currently cpus per cpu slab.
|
||||
@ -3257,6 +3257,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
|
||||
if (WARN_ON(!size))
|
||||
return;
|
||||
|
||||
memcg_slab_free_hook(s, p, size);
|
||||
do {
|
||||
struct detached_freelist df;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user