mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
mm: remove CONFIG_MEMCG_KMEM
CONFIG_MEMCG_KMEM used to be a user-visible option for whether slab tracking is enabled. It has been default-enabled and equivalent to CONFIG_MEMCG for almost a decade. We've only grown more kernel memory accounting sites since, and there is no imaginable cgroup usecase going forward that wants to track user pages but not the multitude of user-drivable kernel allocations. Link: https://lkml.kernel.org/r/20240701153148.452230-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Acked-by: David Hildenbrand <david@redhat.com> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
6df13230b6
commit
3a3b7fec39
@ -275,7 +275,7 @@ struct bpf_map {
|
|||||||
u32 btf_value_type_id;
|
u32 btf_value_type_id;
|
||||||
u32 btf_vmlinux_value_type_id;
|
u32 btf_vmlinux_value_type_id;
|
||||||
struct btf *btf;
|
struct btf *btf;
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
struct obj_cgroup *objcg;
|
struct obj_cgroup *objcg;
|
||||||
#endif
|
#endif
|
||||||
char name[BPF_OBJ_NAME_LEN];
|
char name[BPF_OBJ_NAME_LEN];
|
||||||
@ -2252,7 +2252,7 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
|
|||||||
|
|
||||||
int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
|
int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
|
||||||
unsigned long nr_pages, struct page **page_array);
|
unsigned long nr_pages, struct page **page_array);
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
|
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
|
||||||
int node);
|
int node);
|
||||||
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
|
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
|
||||||
|
@ -50,7 +50,7 @@ struct list_lru_node {
|
|||||||
|
|
||||||
struct list_lru {
|
struct list_lru {
|
||||||
struct list_lru_node *node;
|
struct list_lru_node *node;
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
int shrinker_id;
|
int shrinker_id;
|
||||||
bool memcg_aware;
|
bool memcg_aware;
|
||||||
|
@ -195,7 +195,7 @@ struct mem_cgroup {
|
|||||||
/* Range enforcement for interrupt charges */
|
/* Range enforcement for interrupt charges */
|
||||||
struct work_struct high_work;
|
struct work_struct high_work;
|
||||||
|
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
#ifdef CONFIG_ZSWAP
|
||||||
unsigned long zswap_max;
|
unsigned long zswap_max;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -236,7 +236,6 @@ struct mem_cgroup {
|
|||||||
*/
|
*/
|
||||||
unsigned long socket_pressure;
|
unsigned long socket_pressure;
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
int kmemcg_id;
|
int kmemcg_id;
|
||||||
/*
|
/*
|
||||||
* memcg->objcg is wiped out as a part of the objcg repaprenting
|
* memcg->objcg is wiped out as a part of the objcg repaprenting
|
||||||
@ -247,7 +246,6 @@ struct mem_cgroup {
|
|||||||
struct obj_cgroup *orig_objcg;
|
struct obj_cgroup *orig_objcg;
|
||||||
/* list of inherited objcgs, protected by objcg_lock */
|
/* list of inherited objcgs, protected by objcg_lock */
|
||||||
struct list_head objcg_list;
|
struct list_head objcg_list;
|
||||||
#endif
|
|
||||||
|
|
||||||
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
|
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
|
||||||
|
|
||||||
@ -532,7 +530,6 @@ retry:
|
|||||||
return memcg;
|
return memcg;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
/*
|
/*
|
||||||
* folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
|
* folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
|
||||||
* @folio: Pointer to the folio.
|
* @folio: Pointer to the folio.
|
||||||
@ -548,15 +545,6 @@ static inline bool folio_memcg_kmem(struct folio *folio)
|
|||||||
return folio->memcg_data & MEMCG_DATA_KMEM;
|
return folio->memcg_data & MEMCG_DATA_KMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#else
|
|
||||||
static inline bool folio_memcg_kmem(struct folio *folio)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline bool PageMemcgKmem(struct page *page)
|
static inline bool PageMemcgKmem(struct page *page)
|
||||||
{
|
{
|
||||||
return folio_memcg_kmem(page_folio(page));
|
return folio_memcg_kmem(page_folio(page));
|
||||||
@ -1488,7 +1476,7 @@ static inline void split_page_memcg(struct page *head, int old_order, int new_or
|
|||||||
* if MEMCG_DATA_OBJEXTS is set.
|
* if MEMCG_DATA_OBJEXTS is set.
|
||||||
*/
|
*/
|
||||||
struct slabobj_ext {
|
struct slabobj_ext {
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
struct obj_cgroup *objcg;
|
struct obj_cgroup *objcg;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_MEM_ALLOC_PROFILING
|
#ifdef CONFIG_MEM_ALLOC_PROFILING
|
||||||
@ -1663,7 +1651,7 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
bool mem_cgroup_kmem_disabled(void);
|
bool mem_cgroup_kmem_disabled(void);
|
||||||
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
|
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
|
||||||
void __memcg_kmem_uncharge_page(struct page *page, int order);
|
void __memcg_kmem_uncharge_page(struct page *page, int order);
|
||||||
@ -1806,9 +1794,9 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
|
||||||
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
|
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
|
||||||
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
|
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
|
||||||
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
|
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
|
||||||
|
@ -1457,9 +1457,8 @@ struct task_struct {
|
|||||||
|
|
||||||
/* Used by memcontrol for targeted memcg charge: */
|
/* Used by memcontrol for targeted memcg charge: */
|
||||||
struct mem_cgroup *active_memcg;
|
struct mem_cgroup *active_memcg;
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
/* Cache for current->cgroups->memcg->objcg lookups: */
|
||||||
struct obj_cgroup *objcg;
|
struct obj_cgroup *objcg;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ enum _slab_flag_bits {
|
|||||||
#ifdef CONFIG_FAILSLAB
|
#ifdef CONFIG_FAILSLAB
|
||||||
_SLAB_FAILSLAB,
|
_SLAB_FAILSLAB,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
_SLAB_ACCOUNT,
|
_SLAB_ACCOUNT,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KASAN_GENERIC
|
#ifdef CONFIG_KASAN_GENERIC
|
||||||
@ -171,7 +171,7 @@ enum _slab_flag_bits {
|
|||||||
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
|
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
|
||||||
#endif
|
#endif
|
||||||
/* Account to memcg */
|
/* Account to memcg */
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
|
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
|
||||||
#else
|
#else
|
||||||
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
|
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
|
||||||
@ -407,7 +407,7 @@ enum kmalloc_cache_type {
|
|||||||
#ifndef CONFIG_ZONE_DMA
|
#ifndef CONFIG_ZONE_DMA
|
||||||
KMALLOC_DMA = KMALLOC_NORMAL,
|
KMALLOC_DMA = KMALLOC_NORMAL,
|
||||||
#endif
|
#endif
|
||||||
#ifndef CONFIG_MEMCG_KMEM
|
#ifndef CONFIG_MEMCG
|
||||||
KMALLOC_CGROUP = KMALLOC_NORMAL,
|
KMALLOC_CGROUP = KMALLOC_NORMAL,
|
||||||
#endif
|
#endif
|
||||||
KMALLOC_RANDOM_START = KMALLOC_NORMAL,
|
KMALLOC_RANDOM_START = KMALLOC_NORMAL,
|
||||||
@ -420,7 +420,7 @@ enum kmalloc_cache_type {
|
|||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
KMALLOC_DMA,
|
KMALLOC_DMA,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
KMALLOC_CGROUP,
|
KMALLOC_CGROUP,
|
||||||
#endif
|
#endif
|
||||||
NR_KMALLOC_TYPES
|
NR_KMALLOC_TYPES
|
||||||
@ -435,7 +435,7 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
|
|||||||
#define KMALLOC_NOT_NORMAL_BITS \
|
#define KMALLOC_NOT_NORMAL_BITS \
|
||||||
(__GFP_RECLAIMABLE | \
|
(__GFP_RECLAIMABLE | \
|
||||||
(IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
|
(IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
|
||||||
(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
|
(IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0))
|
||||||
|
|
||||||
extern unsigned long random_kmalloc_seed;
|
extern unsigned long random_kmalloc_seed;
|
||||||
|
|
||||||
@ -463,7 +463,7 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigne
|
|||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
|
if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
|
||||||
return KMALLOC_DMA;
|
return KMALLOC_DMA;
|
||||||
if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
|
if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE))
|
||||||
return KMALLOC_RECLAIM;
|
return KMALLOC_RECLAIM;
|
||||||
else
|
else
|
||||||
return KMALLOC_CGROUP;
|
return KMALLOC_CGROUP;
|
||||||
|
@ -36,7 +36,7 @@ TRACE_EVENT(kmem_cache_alloc,
|
|||||||
__entry->bytes_alloc = s->size;
|
__entry->bytes_alloc = s->size;
|
||||||
__entry->gfp_flags = (__force unsigned long)gfp_flags;
|
__entry->gfp_flags = (__force unsigned long)gfp_flags;
|
||||||
__entry->node = node;
|
__entry->node = node;
|
||||||
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
|
__entry->accounted = IS_ENABLED(CONFIG_MEMCG) ?
|
||||||
((gfp_flags & __GFP_ACCOUNT) ||
|
((gfp_flags & __GFP_ACCOUNT) ||
|
||||||
(s->flags & SLAB_ACCOUNT)) : false;
|
(s->flags & SLAB_ACCOUNT)) : false;
|
||||||
),
|
),
|
||||||
@ -87,7 +87,7 @@ TRACE_EVENT(kmalloc,
|
|||||||
__entry->bytes_alloc,
|
__entry->bytes_alloc,
|
||||||
show_gfp_flags(__entry->gfp_flags),
|
show_gfp_flags(__entry->gfp_flags),
|
||||||
__entry->node,
|
__entry->node,
|
||||||
(IS_ENABLED(CONFIG_MEMCG_KMEM) &&
|
(IS_ENABLED(CONFIG_MEMCG) &&
|
||||||
(__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
|
(__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -986,11 +986,6 @@ config MEMCG_V1
|
|||||||
|
|
||||||
San N is unsure.
|
San N is unsure.
|
||||||
|
|
||||||
config MEMCG_KMEM
|
|
||||||
bool
|
|
||||||
depends on MEMCG
|
|
||||||
default y
|
|
||||||
|
|
||||||
config BLK_CGROUP
|
config BLK_CGROUP
|
||||||
bool "IO controller"
|
bool "IO controller"
|
||||||
depends on BLOCK
|
depends on BLOCK
|
||||||
|
@ -155,12 +155,9 @@ static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
|
|||||||
|
|
||||||
static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
|
static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
if (c->objcg)
|
if (c->objcg)
|
||||||
return get_mem_cgroup_from_objcg(c->objcg);
|
return get_mem_cgroup_from_objcg(c->objcg);
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
|
||||||
return root_mem_cgroup;
|
return root_mem_cgroup;
|
||||||
#else
|
#else
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -534,7 +531,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
|||||||
size += LLIST_NODE_SZ; /* room for llist_node */
|
size += LLIST_NODE_SZ; /* room for llist_node */
|
||||||
unit_size = size;
|
unit_size = size;
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
if (memcg_bpf_enabled())
|
if (memcg_bpf_enabled())
|
||||||
objcg = get_obj_cgroup_from_current();
|
objcg = get_obj_cgroup_from_current();
|
||||||
#endif
|
#endif
|
||||||
@ -556,7 +553,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
|||||||
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
|
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
|
||||||
if (!pcc)
|
if (!pcc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
objcg = get_obj_cgroup_from_current();
|
objcg = get_obj_cgroup_from_current();
|
||||||
#endif
|
#endif
|
||||||
ma->objcg = objcg;
|
ma->objcg = objcg;
|
||||||
|
@ -385,7 +385,7 @@ void bpf_map_free_id(struct bpf_map *map)
|
|||||||
spin_unlock_irqrestore(&map_idr_lock, flags);
|
spin_unlock_irqrestore(&map_idr_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
static void bpf_map_save_memcg(struct bpf_map *map)
|
static void bpf_map_save_memcg(struct bpf_map *map)
|
||||||
{
|
{
|
||||||
/* Currently if a map is created by a process belonging to the root
|
/* Currently if a map is created by a process belonging to the root
|
||||||
@ -486,7 +486,7 @@ int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
|
|||||||
unsigned long i, j;
|
unsigned long i, j;
|
||||||
struct page *pg;
|
struct page *pg;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
struct mem_cgroup *memcg, *old_memcg;
|
struct mem_cgroup *memcg, *old_memcg;
|
||||||
|
|
||||||
memcg = bpf_map_get_memcg(map);
|
memcg = bpf_map_get_memcg(map);
|
||||||
@ -505,7 +505,7 @@ int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
set_active_memcg(old_memcg);
|
set_active_memcg(old_memcg);
|
||||||
mem_cgroup_put(memcg);
|
mem_cgroup_put(memcg);
|
||||||
#endif
|
#endif
|
||||||
|
@ -602,7 +602,7 @@ static unsigned long kfence_init_pool(void)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
__folio_set_slab(slab_folio(slab));
|
__folio_set_slab(slab_folio(slab));
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
|
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
|
||||||
MEMCG_DATA_OBJEXTS;
|
MEMCG_DATA_OBJEXTS;
|
||||||
#endif
|
#endif
|
||||||
@ -652,7 +652,7 @@ reset_slab:
|
|||||||
|
|
||||||
if (!i || (i % 2))
|
if (!i || (i % 2))
|
||||||
continue;
|
continue;
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
slab->obj_exts = 0;
|
slab->obj_exts = 0;
|
||||||
#endif
|
#endif
|
||||||
__folio_clear_slab(slab_folio(slab));
|
__folio_clear_slab(slab_folio(slab));
|
||||||
@ -1146,7 +1146,7 @@ void __kfence_free(void *addr)
|
|||||||
{
|
{
|
||||||
struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
|
struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
KFENCE_WARN_ON(meta->obj_exts.objcg);
|
KFENCE_WARN_ON(meta->obj_exts.objcg);
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
|
@ -97,7 +97,7 @@ struct kfence_metadata {
|
|||||||
struct kfence_track free_track;
|
struct kfence_track free_track;
|
||||||
/* For updating alloc_covered on frees. */
|
/* For updating alloc_covered on frees. */
|
||||||
u32 alloc_stack_hash;
|
u32 alloc_stack_hash;
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
struct slabobj_ext obj_exts;
|
struct slabobj_ext obj_exts;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
#include "slab.h"
|
#include "slab.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
static LIST_HEAD(memcg_list_lrus);
|
static LIST_HEAD(memcg_list_lrus);
|
||||||
static DEFINE_MUTEX(list_lrus_mutex);
|
static DEFINE_MUTEX(list_lrus_mutex);
|
||||||
|
|
||||||
@ -83,7 +83,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
|
|||||||
{
|
{
|
||||||
return &lru->node[nid].lru;
|
return &lru->node[nid].lru;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
|
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
|
||||||
struct mem_cgroup *memcg)
|
struct mem_cgroup *memcg)
|
||||||
@ -294,7 +294,7 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
|
|||||||
isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
|
isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
|
||||||
nr_to_walk);
|
nr_to_walk);
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
|
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
|
||||||
struct list_lru_memcg *mlru;
|
struct list_lru_memcg *mlru;
|
||||||
unsigned long index;
|
unsigned long index;
|
||||||
@ -324,7 +324,7 @@ static void init_one_lru(struct list_lru_one *l)
|
|||||||
l->nr_items = 0;
|
l->nr_items = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
|
static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
|
||||||
{
|
{
|
||||||
int nid;
|
int nid;
|
||||||
@ -544,14 +544,14 @@ static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
|
|||||||
static void memcg_destroy_list_lru(struct list_lru *lru)
|
static void memcg_destroy_list_lru(struct list_lru *lru)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
|
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
|
||||||
struct lock_class_key *key, struct shrinker *shrinker)
|
struct lock_class_key *key, struct shrinker *shrinker)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
if (shrinker)
|
if (shrinker)
|
||||||
lru->shrinker_id = shrinker->id;
|
lru->shrinker_id = shrinker->id;
|
||||||
else
|
else
|
||||||
@ -591,7 +591,7 @@ void list_lru_destroy(struct list_lru *lru)
|
|||||||
kfree(lru->node);
|
kfree(lru->node);
|
||||||
lru->node = NULL;
|
lru->node = NULL;
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
lru->shrinker_id = -1;
|
lru->shrinker_id = -1;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -2756,7 +2756,7 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
static int mem_cgroup_slab_show(struct seq_file *m, void *p)
|
static int mem_cgroup_slab_show(struct seq_file *m, void *p)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -2863,7 +2863,7 @@ struct cftype mem_cgroup_legacy_files[] = {
|
|||||||
.write = mem_cgroup_reset,
|
.write = mem_cgroup_reset,
|
||||||
.read_u64 = mem_cgroup_read_u64,
|
.read_u64 = mem_cgroup_read_u64,
|
||||||
},
|
},
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
{
|
{
|
||||||
.name = "kmem.slabinfo",
|
.name = "kmem.slabinfo",
|
||||||
.seq_show = mem_cgroup_slab_show,
|
.seq_show = mem_cgroup_slab_show,
|
||||||
@ -2922,7 +2922,6 @@ struct cftype memsw_files[] = {
|
|||||||
{ }, /* terminate */
|
{ }, /* terminate */
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages)
|
void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages)
|
||||||
{
|
{
|
||||||
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
|
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
|
||||||
@ -2932,7 +2931,6 @@ void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages)
|
|||||||
page_counter_uncharge(&memcg->kmem, -nr_pages);
|
page_counter_uncharge(&memcg->kmem, -nr_pages);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
|
||||||
|
|
||||||
bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
|
bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
|
||||||
gfp_t gfp_mask)
|
gfp_t gfp_mask)
|
||||||
|
@ -118,7 +118,6 @@ struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
|
|||||||
#define CURRENT_OBJCG_UPDATE_BIT 0
|
#define CURRENT_OBJCG_UPDATE_BIT 0
|
||||||
#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
|
#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
static DEFINE_SPINLOCK(objcg_lock);
|
static DEFINE_SPINLOCK(objcg_lock);
|
||||||
|
|
||||||
bool mem_cgroup_kmem_disabled(void)
|
bool mem_cgroup_kmem_disabled(void)
|
||||||
@ -223,7 +222,6 @@ EXPORT_SYMBOL(memcg_kmem_online_key);
|
|||||||
|
|
||||||
DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
|
DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
|
||||||
EXPORT_SYMBOL(memcg_bpf_enabled_key);
|
EXPORT_SYMBOL(memcg_bpf_enabled_key);
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mem_cgroup_css_from_folio - css of the memcg associated with a folio
|
* mem_cgroup_css_from_folio - css of the memcg associated with a folio
|
||||||
@ -423,7 +421,7 @@ static const unsigned int memcg_vm_event_stat[] = {
|
|||||||
PGDEACTIVATE,
|
PGDEACTIVATE,
|
||||||
PGLAZYFREE,
|
PGLAZYFREE,
|
||||||
PGLAZYFREED,
|
PGLAZYFREED,
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
#ifdef CONFIG_ZSWAP
|
||||||
ZSWPIN,
|
ZSWPIN,
|
||||||
ZSWPOUT,
|
ZSWPOUT,
|
||||||
ZSWPWB,
|
ZSWPWB,
|
||||||
@ -1346,7 +1344,7 @@ static const struct memory_stat memory_stats[] = {
|
|||||||
{ "sock", MEMCG_SOCK },
|
{ "sock", MEMCG_SOCK },
|
||||||
{ "vmalloc", MEMCG_VMALLOC },
|
{ "vmalloc", MEMCG_VMALLOC },
|
||||||
{ "shmem", NR_SHMEM },
|
{ "shmem", NR_SHMEM },
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
#ifdef CONFIG_ZSWAP
|
||||||
{ "zswap", MEMCG_ZSWAP_B },
|
{ "zswap", MEMCG_ZSWAP_B },
|
||||||
{ "zswapped", MEMCG_ZSWAPPED },
|
{ "zswapped", MEMCG_ZSWAPPED },
|
||||||
#endif
|
#endif
|
||||||
@ -1700,13 +1698,11 @@ struct memcg_stock_pcp {
|
|||||||
struct mem_cgroup *cached; /* this never be root cgroup */
|
struct mem_cgroup *cached; /* this never be root cgroup */
|
||||||
unsigned int nr_pages;
|
unsigned int nr_pages;
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
struct obj_cgroup *cached_objcg;
|
struct obj_cgroup *cached_objcg;
|
||||||
struct pglist_data *cached_pgdat;
|
struct pglist_data *cached_pgdat;
|
||||||
unsigned int nr_bytes;
|
unsigned int nr_bytes;
|
||||||
int nr_slab_reclaimable_b;
|
int nr_slab_reclaimable_b;
|
||||||
int nr_slab_unreclaimable_b;
|
int nr_slab_unreclaimable_b;
|
||||||
#endif
|
|
||||||
|
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -1717,23 +1713,10 @@ static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
|
|||||||
};
|
};
|
||||||
static DEFINE_MUTEX(percpu_charge_mutex);
|
static DEFINE_MUTEX(percpu_charge_mutex);
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
|
static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
|
||||||
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
||||||
struct mem_cgroup *root_memcg);
|
struct mem_cgroup *root_memcg);
|
||||||
|
|
||||||
#else
|
|
||||||
static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
|
||||||
struct mem_cgroup *root_memcg)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* consume_stock: Try to consume stocked charge on this cpu.
|
* consume_stock: Try to consume stocked charge on this cpu.
|
||||||
* @memcg: memcg to consume from.
|
* @memcg: memcg to consume from.
|
||||||
@ -2412,8 +2395,6 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
|
|
||||||
static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
|
static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
|
||||||
struct pglist_data *pgdat,
|
struct pglist_data *pgdat,
|
||||||
enum node_stat_item idx, int nr)
|
enum node_stat_item idx, int nr)
|
||||||
@ -3069,7 +3050,6 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
|
|||||||
obj_cgroup_put(objcg);
|
obj_cgroup_put(objcg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Because folio_memcg(head) is not set on tails, set it now.
|
* Because folio_memcg(head) is not set on tails, set it now.
|
||||||
@ -3116,7 +3096,6 @@ unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
|
|||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
static int memcg_online_kmem(struct mem_cgroup *memcg)
|
static int memcg_online_kmem(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
struct obj_cgroup *objcg;
|
struct obj_cgroup *objcg;
|
||||||
@ -3167,15 +3146,6 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
|
|||||||
*/
|
*/
|
||||||
memcg_reparent_list_lrus(memcg, parent);
|
memcg_reparent_list_lrus(memcg, parent);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static int memcg_online_kmem(struct mem_cgroup *memcg)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
static void memcg_offline_kmem(struct mem_cgroup *memcg)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||||
|
|
||||||
@ -3590,10 +3560,8 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
|
|||||||
vmpressure_init(&memcg->vmpressure);
|
vmpressure_init(&memcg->vmpressure);
|
||||||
memcg->socket_pressure = jiffies;
|
memcg->socket_pressure = jiffies;
|
||||||
memcg1_memcg_init(memcg);
|
memcg1_memcg_init(memcg);
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
memcg->kmemcg_id = -1;
|
memcg->kmemcg_id = -1;
|
||||||
INIT_LIST_HEAD(&memcg->objcg_list);
|
INIT_LIST_HEAD(&memcg->objcg_list);
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||||
INIT_LIST_HEAD(&memcg->cgwb_list);
|
INIT_LIST_HEAD(&memcg->cgwb_list);
|
||||||
for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
|
for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
|
||||||
@ -3627,7 +3595,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
|||||||
|
|
||||||
page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
|
page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
|
||||||
memcg1_soft_limit_reset(memcg);
|
memcg1_soft_limit_reset(memcg);
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
#ifdef CONFIG_ZSWAP
|
||||||
memcg->zswap_max = PAGE_COUNTER_MAX;
|
memcg->zswap_max = PAGE_COUNTER_MAX;
|
||||||
WRITE_ONCE(memcg->zswap_writeback,
|
WRITE_ONCE(memcg->zswap_writeback,
|
||||||
!parent || READ_ONCE(parent->zswap_writeback));
|
!parent || READ_ONCE(parent->zswap_writeback));
|
||||||
@ -3659,10 +3627,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
|||||||
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
|
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
|
||||||
static_branch_inc(&memcg_sockets_enabled_key);
|
static_branch_inc(&memcg_sockets_enabled_key);
|
||||||
|
|
||||||
#if defined(CONFIG_MEMCG_KMEM)
|
|
||||||
if (!cgroup_memory_nobpf)
|
if (!cgroup_memory_nobpf)
|
||||||
static_branch_inc(&memcg_bpf_enabled_key);
|
static_branch_inc(&memcg_bpf_enabled_key);
|
||||||
#endif
|
|
||||||
|
|
||||||
return &memcg->css;
|
return &memcg->css;
|
||||||
}
|
}
|
||||||
@ -3755,10 +3721,8 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
|||||||
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
|
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
|
||||||
static_branch_dec(&memcg_sockets_enabled_key);
|
static_branch_dec(&memcg_sockets_enabled_key);
|
||||||
|
|
||||||
#if defined(CONFIG_MEMCG_KMEM)
|
|
||||||
if (!cgroup_memory_nobpf)
|
if (!cgroup_memory_nobpf)
|
||||||
static_branch_dec(&memcg_bpf_enabled_key);
|
static_branch_dec(&memcg_bpf_enabled_key);
|
||||||
#endif
|
|
||||||
|
|
||||||
vmpressure_cleanup(&memcg->vmpressure);
|
vmpressure_cleanup(&memcg->vmpressure);
|
||||||
cancel_work_sync(&memcg->high_work);
|
cancel_work_sync(&memcg->high_work);
|
||||||
@ -3901,7 +3865,6 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
|
|||||||
atomic64_set(&memcg->vmstats->stats_updates, 0);
|
atomic64_set(&memcg->vmstats->stats_updates, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
static void mem_cgroup_fork(struct task_struct *task)
|
static void mem_cgroup_fork(struct task_struct *task)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -3929,7 +3892,6 @@ static void mem_cgroup_exit(struct task_struct *task)
|
|||||||
*/
|
*/
|
||||||
task->objcg = NULL;
|
task->objcg = NULL;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_LRU_GEN
|
#ifdef CONFIG_LRU_GEN
|
||||||
static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
|
static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
|
||||||
@ -3953,7 +3915,6 @@ static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
|
|||||||
static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
|
static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
|
||||||
#endif /* CONFIG_LRU_GEN */
|
#endif /* CONFIG_LRU_GEN */
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
|
static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
@ -3964,17 +3925,12 @@ static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
|
|||||||
set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
|
set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
|
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
|
||||||
|
|
||||||
#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
|
|
||||||
static void mem_cgroup_attach(struct cgroup_taskset *tset)
|
static void mem_cgroup_attach(struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
mem_cgroup_lru_gen_attach(tset);
|
mem_cgroup_lru_gen_attach(tset);
|
||||||
mem_cgroup_kmem_attach(tset);
|
mem_cgroup_kmem_attach(tset);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
|
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
|
||||||
{
|
{
|
||||||
@ -4421,13 +4377,9 @@ struct cgroup_subsys memory_cgrp_subsys = {
|
|||||||
.css_free = mem_cgroup_css_free,
|
.css_free = mem_cgroup_css_free,
|
||||||
.css_reset = mem_cgroup_css_reset,
|
.css_reset = mem_cgroup_css_reset,
|
||||||
.css_rstat_flush = mem_cgroup_css_rstat_flush,
|
.css_rstat_flush = mem_cgroup_css_rstat_flush,
|
||||||
#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
|
|
||||||
.attach = mem_cgroup_attach,
|
.attach = mem_cgroup_attach,
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
|
||||||
.fork = mem_cgroup_fork,
|
.fork = mem_cgroup_fork,
|
||||||
.exit = mem_cgroup_exit,
|
.exit = mem_cgroup_exit,
|
||||||
#endif
|
|
||||||
.dfl_cftypes = memory_files,
|
.dfl_cftypes = memory_files,
|
||||||
#ifdef CONFIG_MEMCG_V1
|
#ifdef CONFIG_MEMCG_V1
|
||||||
.can_attach = memcg1_can_attach,
|
.can_attach = memcg1_can_attach,
|
||||||
@ -5395,7 +5347,7 @@ static struct cftype swap_files[] = {
|
|||||||
{ } /* terminate */
|
{ } /* terminate */
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
#ifdef CONFIG_ZSWAP
|
||||||
/**
|
/**
|
||||||
* obj_cgroup_may_zswap - check if this cgroup can zswap
|
* obj_cgroup_may_zswap - check if this cgroup can zswap
|
||||||
* @objcg: the object cgroup
|
* @objcg: the object cgroup
|
||||||
@ -5577,7 +5529,7 @@ static struct cftype zswap_files[] = {
|
|||||||
},
|
},
|
||||||
{ } /* terminate */
|
{ } /* terminate */
|
||||||
};
|
};
|
||||||
#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
|
#endif /* CONFIG_ZSWAP */
|
||||||
|
|
||||||
static int __init mem_cgroup_swap_init(void)
|
static int __init mem_cgroup_swap_init(void)
|
||||||
{
|
{
|
||||||
@ -5588,7 +5540,7 @@ static int __init mem_cgroup_swap_init(void)
|
|||||||
#ifdef CONFIG_MEMCG_V1
|
#ifdef CONFIG_MEMCG_V1
|
||||||
WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
|
WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
|
||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
#ifdef CONFIG_ZSWAP
|
||||||
WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
|
WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -33,7 +33,7 @@ struct pcpu_block_md {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct pcpuobj_ext {
|
struct pcpuobj_ext {
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
struct obj_cgroup *cgroup;
|
struct obj_cgroup *cgroup;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_MEM_ALLOC_PROFILING
|
#ifdef CONFIG_MEM_ALLOC_PROFILING
|
||||||
@ -41,7 +41,7 @@ struct pcpuobj_ext {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MEM_ALLOC_PROFILING)
|
#if defined(CONFIG_MEMCG) || defined(CONFIG_MEM_ALLOC_PROFILING)
|
||||||
#define NEED_PCPUOBJ_EXT
|
#define NEED_PCPUOBJ_EXT
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -154,7 +154,7 @@ static inline size_t pcpu_obj_full_size(size_t size)
|
|||||||
{
|
{
|
||||||
size_t extra_size = 0;
|
size_t extra_size = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
if (!mem_cgroup_kmem_disabled())
|
if (!mem_cgroup_kmem_disabled())
|
||||||
extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
|
extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1619,7 +1619,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
|
|||||||
return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
|
return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
|
static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
|
||||||
struct obj_cgroup **objcgp)
|
struct obj_cgroup **objcgp)
|
||||||
{
|
{
|
||||||
@ -1681,7 +1681,7 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
|
|||||||
obj_cgroup_put(objcg);
|
obj_cgroup_put(objcg);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_MEMCG_KMEM */
|
#else /* CONFIG_MEMCG */
|
||||||
static bool
|
static bool
|
||||||
pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
|
pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
|
||||||
{
|
{
|
||||||
@ -1697,7 +1697,7 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
|
|||||||
static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
|
static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
#ifdef CONFIG_MEM_ALLOC_PROFILING
|
#ifdef CONFIG_MEM_ALLOC_PROFILING
|
||||||
static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
|
static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off,
|
||||||
|
@ -573,7 +573,7 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
|
|||||||
NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
|
NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
|
bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
|
||||||
gfp_t flags, size_t size, void **p);
|
gfp_t flags, size_t size, void **p);
|
||||||
void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
|
void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
|
||||||
|
@ -725,7 +725,7 @@ EXPORT_SYMBOL(kmalloc_size_roundup);
|
|||||||
#define KMALLOC_DMA_NAME(sz)
|
#define KMALLOC_DMA_NAME(sz)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
#define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
|
#define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
|
||||||
#else
|
#else
|
||||||
#define KMALLOC_CGROUP_NAME(sz)
|
#define KMALLOC_CGROUP_NAME(sz)
|
||||||
@ -867,7 +867,7 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type)
|
|||||||
|
|
||||||
if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
|
if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
|
||||||
flags |= SLAB_RECLAIM_ACCOUNT;
|
flags |= SLAB_RECLAIM_ACCOUNT;
|
||||||
} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
|
} else if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_CGROUP)) {
|
||||||
if (mem_cgroup_kmem_disabled()) {
|
if (mem_cgroup_kmem_disabled()) {
|
||||||
kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
|
kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
|
||||||
return;
|
return;
|
||||||
@ -883,10 +883,10 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
|
* If CONFIG_MEMCG is enabled, disable cache merging for
|
||||||
* KMALLOC_NORMAL caches.
|
* KMALLOC_NORMAL caches.
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
|
if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_NORMAL))
|
||||||
flags |= SLAB_NO_MERGE;
|
flags |= SLAB_NO_MERGE;
|
||||||
|
|
||||||
if (minalign > ARCH_KMALLOC_MINALIGN) {
|
if (minalign > ARCH_KMALLOC_MINALIGN) {
|
||||||
@ -913,7 +913,7 @@ void __init create_kmalloc_caches(void)
|
|||||||
enum kmalloc_cache_type type;
|
enum kmalloc_cache_type type;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
|
* Including KMALLOC_CGROUP if CONFIG_MEMCG defined
|
||||||
*/
|
*/
|
||||||
for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
|
for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
|
||||||
/* Caches that are NOT of the two-to-the-power-of size. */
|
/* Caches that are NOT of the two-to-the-power-of size. */
|
||||||
|
10
mm/slub.c
10
mm/slub.c
@ -2020,7 +2020,7 @@ static inline bool need_slab_obj_ext(void)
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
|
* CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
|
||||||
* inside memcg_slab_post_alloc_hook. No other users for now.
|
* inside memcg_slab_post_alloc_hook. No other users for now.
|
||||||
*/
|
*/
|
||||||
return false;
|
return false;
|
||||||
@ -2104,7 +2104,7 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
|
|||||||
|
|
||||||
#endif /* CONFIG_SLAB_OBJ_EXT */
|
#endif /* CONFIG_SLAB_OBJ_EXT */
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
|
|
||||||
static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
|
static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
|
||||||
|
|
||||||
@ -2146,7 +2146,7 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
|
|||||||
|
|
||||||
__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
|
__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
|
||||||
}
|
}
|
||||||
#else /* CONFIG_MEMCG_KMEM */
|
#else /* CONFIG_MEMCG */
|
||||||
static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
|
static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
|
||||||
struct list_lru *lru,
|
struct list_lru *lru,
|
||||||
gfp_t flags, size_t size,
|
gfp_t flags, size_t size,
|
||||||
@ -2159,7 +2159,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
|
|||||||
void **p, int objects)
|
void **p, int objects)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hooks for other subsystems that check memory allocations. In a typical
|
* Hooks for other subsystems that check memory allocations. In a typical
|
||||||
@ -4456,7 +4456,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
|
|||||||
do_slab_free(s, slab, object, object, 1, addr);
|
do_slab_free(s, slab, object, object, 1, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG
|
||||||
/* Do not inline the rare memcg charging failed path into the allocation path */
|
/* Do not inline the rare memcg charging failed path into the allocation path */
|
||||||
static noinline
|
static noinline
|
||||||
void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
|
void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
|
||||||
|
@ -3,5 +3,4 @@ CONFIG_CGROUP_CPUACCT=y
|
|||||||
CONFIG_CGROUP_FREEZER=y
|
CONFIG_CGROUP_FREEZER=y
|
||||||
CONFIG_CGROUP_SCHED=y
|
CONFIG_CGROUP_SCHED=y
|
||||||
CONFIG_MEMCG=y
|
CONFIG_MEMCG=y
|
||||||
CONFIG_MEMCG_KMEM=y
|
|
||||||
CONFIG_PAGE_COUNTER=y
|
CONFIG_PAGE_COUNTER=y
|
||||||
|
Loading…
Reference in New Issue
Block a user