forked from Minki/linux
memcg, slab: fix barrier usage when accessing memcg_caches
Each root kmem_cache has pointers to per-memcg caches stored in its memcg_params::memcg_caches array. Whenever we want to allocate a slab for a memcg, we access this array to get per-memcg cache to allocate from (see memcg_kmem_get_cache()). The access must be lock-free for performance reasons, so we should use barriers to assert the kmem_cache is up-to-date. First, we should place a write barrier immediately before setting the pointer to it in the memcg_caches array in order to make sure nobody will see a partially initialized object. Second, we should issue a read barrier before dereferencing the pointer to conform to the write barrier. However, currently the barrier usage looks rather strange. We have a write barrier *after* setting the pointer and a read barrier *before* reading the pointer, which is incorrect. This patch fixes this. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Glauber Costa <glommer@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1aa1325425
commit
959c8963fc
@ -3274,12 +3274,14 @@ void memcg_register_cache(struct kmem_cache *s)
|
|||||||
list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
|
list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
|
||||||
mutex_unlock(&memcg->slab_caches_mutex);
|
mutex_unlock(&memcg->slab_caches_mutex);
|
||||||
|
|
||||||
root->memcg_params->memcg_caches[id] = s;
|
|
||||||
/*
|
/*
|
||||||
* the readers won't lock, make sure everybody sees the updated value,
|
* Since readers won't lock (see cache_from_memcg_idx()), we need a
|
||||||
* so they won't put stuff in the queue again for no reason
|
* barrier here to ensure nobody will see the kmem_cache partially
|
||||||
|
* initialized.
|
||||||
*/
|
*/
|
||||||
wmb();
|
smp_wmb();
|
||||||
|
|
||||||
|
root->memcg_params->memcg_caches[id] = s;
|
||||||
}
|
}
|
||||||
|
|
||||||
void memcg_unregister_cache(struct kmem_cache *s)
|
void memcg_unregister_cache(struct kmem_cache *s)
|
||||||
@ -3605,7 +3607,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
|
|||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
int idx;
|
struct kmem_cache *memcg_cachep;
|
||||||
|
|
||||||
VM_BUG_ON(!cachep->memcg_params);
|
VM_BUG_ON(!cachep->memcg_params);
|
||||||
VM_BUG_ON(!cachep->memcg_params->is_root_cache);
|
VM_BUG_ON(!cachep->memcg_params->is_root_cache);
|
||||||
@ -3619,15 +3621,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
|
|||||||
if (!memcg_can_account_kmem(memcg))
|
if (!memcg_can_account_kmem(memcg))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
idx = memcg_cache_id(memcg);
|
memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
|
||||||
|
if (likely(memcg_cachep)) {
|
||||||
/*
|
cachep = memcg_cachep;
|
||||||
* barrier to mare sure we're always seeing the up to date value. The
|
|
||||||
* code updating memcg_caches will issue a write barrier to match this.
|
|
||||||
*/
|
|
||||||
read_barrier_depends();
|
|
||||||
if (likely(cache_from_memcg_idx(cachep, idx))) {
|
|
||||||
cachep = cache_from_memcg_idx(cachep, idx);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
12
mm/slab.h
12
mm/slab.h
@ -163,9 +163,19 @@ static inline const char *cache_name(struct kmem_cache *s)
|
|||||||
static inline struct kmem_cache *
|
static inline struct kmem_cache *
|
||||||
cache_from_memcg_idx(struct kmem_cache *s, int idx)
|
cache_from_memcg_idx(struct kmem_cache *s, int idx)
|
||||||
{
|
{
|
||||||
|
struct kmem_cache *cachep;
|
||||||
|
|
||||||
if (!s->memcg_params)
|
if (!s->memcg_params)
|
||||||
return NULL;
|
return NULL;
|
||||||
return s->memcg_params->memcg_caches[idx];
|
cachep = s->memcg_params->memcg_caches[idx];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure we will access the up-to-date value. The code updating
|
||||||
|
* memcg_caches issues a write barrier to match this (see
|
||||||
|
* memcg_register_cache()).
|
||||||
|
*/
|
||||||
|
smp_read_barrier_depends();
|
||||||
|
return cachep;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
|
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
|
||||||
|
Loading…
Reference in New Issue
Block a user