mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
slab: add SLAB_ACCOUNT flag
Currently, if we want to account all objects of a particular kmem cache, we have to pass __GFP_ACCOUNT to each kmem_cache_alloc call, which is inconvenient. This patch introduces SLAB_ACCOUNT flag which if passed to kmem_cache_create will force accounting for every allocation from this cache even if __GFP_ACCOUNT is not passed. This patch does not make any of the existing caches use this flag - it will be done later in the series. Note, a cache with SLAB_ACCOUNT cannot be merged with a cache w/o SLAB_ACCOUNT, because merged caches share the same kmem_cache struct and hence cannot have different sets of SLAB_* flags. Thus using this flag will probably reduce the number of merged slabs even if kmem accounting is not used (only compiled in). Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Suggested-by: Tejun Heo <tj@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Greg Thelen <gthelen@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a9bb7e620e
commit
230e9fc286
@ -766,15 +766,13 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
|
||||
return memcg ? memcg->kmemcg_id : -1;
|
||||
}
|
||||
|
||||
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
|
||||
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
|
||||
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
|
||||
|
||||
static inline bool __memcg_kmem_bypass(gfp_t gfp)
|
||||
static inline bool __memcg_kmem_bypass(void)
|
||||
{
|
||||
if (!memcg_kmem_enabled())
|
||||
return true;
|
||||
if (!(gfp & __GFP_ACCOUNT))
|
||||
return true;
|
||||
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
|
||||
return true;
|
||||
return false;
|
||||
@ -791,7 +789,9 @@ static inline bool __memcg_kmem_bypass(gfp_t gfp)
|
||||
static __always_inline int memcg_kmem_charge(struct page *page,
|
||||
gfp_t gfp, int order)
|
||||
{
|
||||
if (__memcg_kmem_bypass(gfp))
|
||||
if (__memcg_kmem_bypass())
|
||||
return 0;
|
||||
if (!(gfp & __GFP_ACCOUNT))
|
||||
return 0;
|
||||
return __memcg_kmem_charge(page, gfp, order);
|
||||
}
|
||||
@ -810,16 +810,15 @@ static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
|
||||
/**
|
||||
* memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
|
||||
* @cachep: the original global kmem cache
|
||||
* @gfp: allocation flags.
|
||||
*
|
||||
* All memory allocated from a per-memcg cache is charged to the owner memcg.
|
||||
*/
|
||||
static __always_inline struct kmem_cache *
|
||||
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
|
||||
{
|
||||
if (__memcg_kmem_bypass(gfp))
|
||||
if (__memcg_kmem_bypass())
|
||||
return cachep;
|
||||
return __memcg_kmem_get_cache(cachep);
|
||||
return __memcg_kmem_get_cache(cachep, gfp);
|
||||
}
|
||||
|
||||
static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
|
||||
|
@ -86,6 +86,11 @@
|
||||
#else
|
||||
# define SLAB_FAILSLAB 0x00000000UL
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
|
||||
#else
|
||||
# define SLAB_ACCOUNT 0x00000000UL
|
||||
#endif
|
||||
|
||||
/* The following flags affect the page allocator grouping pages by mobility */
|
||||
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
|
||||
|
@ -2356,7 +2356,7 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
|
||||
* Can't be called in interrupt context or from kernel threads.
|
||||
* This function needs to be called with rcu_read_lock() held.
|
||||
*/
|
||||
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
|
||||
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
struct kmem_cache *memcg_cachep;
|
||||
@ -2364,6 +2364,12 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
|
||||
|
||||
VM_BUG_ON(!is_root_cache(cachep));
|
||||
|
||||
if (cachep->flags & SLAB_ACCOUNT)
|
||||
gfp |= __GFP_ACCOUNT;
|
||||
|
||||
if (!(gfp & __GFP_ACCOUNT))
|
||||
return cachep;
|
||||
|
||||
if (current->memcg_kmem_skip_account)
|
||||
return cachep;
|
||||
|
||||
|
@ -128,10 +128,11 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
|
||||
|
||||
#if defined(CONFIG_SLAB)
|
||||
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
|
||||
SLAB_NOTRACK | SLAB_ACCOUNT)
|
||||
#elif defined(CONFIG_SLUB)
|
||||
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
|
||||
SLAB_TEMPORARY | SLAB_NOTRACK)
|
||||
SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
|
||||
#else
|
||||
#define SLAB_CACHE_FLAGS (0)
|
||||
#endif
|
||||
|
@ -37,7 +37,8 @@ struct kmem_cache *kmem_cache;
|
||||
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
|
||||
SLAB_FAILSLAB)
|
||||
|
||||
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
|
||||
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
|
||||
SLAB_NOTRACK | SLAB_ACCOUNT)
|
||||
|
||||
/*
|
||||
* Merge control. If this is set then no merging of slab caches will occur.
|
||||
|
Loading…
Reference in New Issue
Block a user