mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 20:01:55 +00:00
mm, memcg: rework remote charging API to support nesting
Currently the remote memcg charging API consists of two functions: memalloc_use_memcg() and memalloc_unuse_memcg(), which set and clear the memcg value, which overwrites the memcg of the current task. memalloc_use_memcg(target_memcg); <...> memalloc_unuse_memcg(); It works perfectly for allocations performed from a normal context, however an attempt to call it from an interrupt context or just nest two remote charging blocks will lead to an incorrect accounting. On exit from the inner block the active memcg will be cleared instead of being restored. memalloc_use_memcg(target_memcg); memalloc_use_memcg(target_memcg_2); <...> memalloc_unuse_memcg(); Error: allocation here are charged to the memcg of the current process instead of target_memcg. memalloc_unuse_memcg(); This patch extends the remote charging API by switching to a single function: struct mem_cgroup *set_active_memcg(struct mem_cgroup *memcg), which sets the new value and returns the old one. So a remote charging block will look like: old_memcg = set_active_memcg(target_memcg); <...> set_active_memcg(old_memcg); This patch is heavily based on the patch by Johannes Weiner, which can be found here: https://lkml.org/lkml/2020/5/28/806 . Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Dan Schatzberg <dschatzberg@fb.com> Link: https://lkml.kernel.org/r/20200821212056.3769116-1-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7404840d87
commit
b87d8cefe4
@ -842,13 +842,13 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
|
||||
struct buffer_head *bh, *head;
|
||||
gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
|
||||
long offset;
|
||||
struct mem_cgroup *memcg;
|
||||
struct mem_cgroup *memcg, *old_memcg;
|
||||
|
||||
if (retry)
|
||||
gfp |= __GFP_NOFAIL;
|
||||
|
||||
memcg = get_mem_cgroup_from_page(page);
|
||||
memalloc_use_memcg(memcg);
|
||||
old_memcg = set_active_memcg(memcg);
|
||||
|
||||
head = NULL;
|
||||
offset = PAGE_SIZE;
|
||||
@ -867,7 +867,7 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
|
||||
set_bh_page(bh, page, offset);
|
||||
}
|
||||
out:
|
||||
memalloc_unuse_memcg();
|
||||
set_active_memcg(old_memcg);
|
||||
mem_cgroup_put(memcg);
|
||||
return head;
|
||||
/*
|
||||
|
@ -531,6 +531,7 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
|
||||
struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir);
|
||||
const struct path *path = fsnotify_data_path(data, data_type);
|
||||
unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
|
||||
struct mem_cgroup *old_memcg;
|
||||
struct inode *child = NULL;
|
||||
bool name_event = false;
|
||||
|
||||
@ -580,7 +581,7 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
|
||||
gfp |= __GFP_RETRY_MAYFAIL;
|
||||
|
||||
/* Whoever is interested in the event, pays for the allocation. */
|
||||
memalloc_use_memcg(group->memcg);
|
||||
old_memcg = set_active_memcg(group->memcg);
|
||||
|
||||
if (fanotify_is_perm_event(mask)) {
|
||||
event = fanotify_alloc_perm_event(path, gfp);
|
||||
@ -608,7 +609,7 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
|
||||
event->pid = get_pid(task_tgid(current));
|
||||
|
||||
out:
|
||||
memalloc_unuse_memcg();
|
||||
set_active_memcg(old_memcg);
|
||||
return event;
|
||||
}
|
||||
|
||||
|
@ -66,6 +66,7 @@ static int inotify_one_event(struct fsnotify_group *group, u32 mask,
|
||||
int ret;
|
||||
int len = 0;
|
||||
int alloc_len = sizeof(struct inotify_event_info);
|
||||
struct mem_cgroup *old_memcg;
|
||||
|
||||
if ((inode_mark->mask & FS_EXCL_UNLINK) &&
|
||||
path && d_unlinked(path->dentry))
|
||||
@ -87,9 +88,9 @@ static int inotify_one_event(struct fsnotify_group *group, u32 mask,
|
||||
* trigger OOM killer in the target monitoring memcg as it may have
|
||||
* security repercussion.
|
||||
*/
|
||||
memalloc_use_memcg(group->memcg);
|
||||
old_memcg = set_active_memcg(group->memcg);
|
||||
event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
|
||||
memalloc_unuse_memcg();
|
||||
set_active_memcg(old_memcg);
|
||||
|
||||
if (unlikely(!event)) {
|
||||
/*
|
||||
|
@ -280,38 +280,28 @@ static inline void memalloc_nocma_restore(unsigned int flags)
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
/**
|
||||
* memalloc_use_memcg - Starts the remote memcg charging scope.
|
||||
* set_active_memcg - Starts the remote memcg charging scope.
|
||||
* @memcg: memcg to charge.
|
||||
*
|
||||
* This function marks the beginning of the remote memcg charging scope. All the
|
||||
* __GFP_ACCOUNT allocations till the end of the scope will be charged to the
|
||||
* given memcg.
|
||||
*
|
||||
* NOTE: This function is not nesting safe.
|
||||
* NOTE: This function can nest. Users must save the return value and
|
||||
* reset the previous value after their own charging scope is over.
|
||||
*/
|
||||
static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
|
||||
static inline struct mem_cgroup *
|
||||
set_active_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
WARN_ON_ONCE(current->active_memcg);
|
||||
struct mem_cgroup *old = current->active_memcg;
|
||||
current->active_memcg = memcg;
|
||||
}
|
||||
|
||||
/**
|
||||
* memalloc_unuse_memcg - Ends the remote memcg charging scope.
|
||||
*
|
||||
* This function marks the end of the remote memcg charging scope started by
|
||||
* memalloc_use_memcg().
|
||||
*/
|
||||
static inline void memalloc_unuse_memcg(void)
|
||||
{
|
||||
current->active_memcg = NULL;
|
||||
return old;
|
||||
}
|
||||
#else
|
||||
static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void memalloc_unuse_memcg(void)
|
||||
static inline struct mem_cgroup *
|
||||
set_active_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -5290,12 +5290,12 @@ static struct cgroup_subsys_state * __ref
|
||||
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
{
|
||||
struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
|
||||
struct mem_cgroup *memcg;
|
||||
struct mem_cgroup *memcg, *old_memcg;
|
||||
long error = -ENOMEM;
|
||||
|
||||
memalloc_use_memcg(parent);
|
||||
old_memcg = set_active_memcg(parent);
|
||||
memcg = mem_cgroup_alloc();
|
||||
memalloc_unuse_memcg();
|
||||
set_active_memcg(old_memcg);
|
||||
if (IS_ERR(memcg))
|
||||
return ERR_CAST(memcg);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user