mm/sl[aou]b: Get rid of __kmem_cache_destroy
What is done there can be done in __kmem_cache_shutdown. This affects RCU handling somewhat. On rcu free all slab allocators do not refer to other management structures than the kmem_cache structure. Therefore these other structures can be freed before the rcu deferred free to the page allocator occurs. Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
8f4c765c22
commit
12c3667fb7
46
mm/slab.c
46
mm/slab.c
@ -2208,26 +2208,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kmem_cache_destroy(struct kmem_cache *cachep)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct kmem_list3 *l3;
|
|
||||||
|
|
||||||
for_each_online_cpu(i)
|
|
||||||
kfree(cachep->array[i]);
|
|
||||||
|
|
||||||
/* NUMA: free the list3 structures */
|
|
||||||
for_each_online_node(i) {
|
|
||||||
l3 = cachep->nodelists[i];
|
|
||||||
if (l3) {
|
|
||||||
kfree(l3->shared);
|
|
||||||
free_alien_cache(l3->alien);
|
|
||||||
kfree(l3);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* calculate_slab_order - calculate size (page order) of slabs
|
* calculate_slab_order - calculate size (page order) of slabs
|
||||||
* @cachep: pointer to the cache that is being created
|
* @cachep: pointer to the cache that is being created
|
||||||
@ -2364,9 +2344,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|||||||
* Cannot be called within a int, but can be interrupted.
|
* Cannot be called within a int, but can be interrupted.
|
||||||
* The @ctor is run when new pages are allocated by the cache.
|
* The @ctor is run when new pages are allocated by the cache.
|
||||||
*
|
*
|
||||||
* @name must be valid until the cache is destroyed. This implies that
|
|
||||||
* the module calling this has to destroy the cache before getting unloaded.
|
|
||||||
*
|
|
||||||
* The flags are
|
* The flags are
|
||||||
*
|
*
|
||||||
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
|
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
|
||||||
@ -2591,7 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
cachep->refcount = 1;
|
cachep->refcount = 1;
|
||||||
|
|
||||||
if (setup_cpu_cache(cachep, gfp)) {
|
if (setup_cpu_cache(cachep, gfp)) {
|
||||||
__kmem_cache_destroy(cachep);
|
__kmem_cache_shutdown(cachep);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2766,7 +2743,26 @@ EXPORT_SYMBOL(kmem_cache_shrink);
|
|||||||
|
|
||||||
int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
||||||
{
|
{
|
||||||
return __cache_shrink(cachep);
|
int i;
|
||||||
|
struct kmem_list3 *l3;
|
||||||
|
int rc = __cache_shrink(cachep);
|
||||||
|
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
for_each_online_cpu(i)
|
||||||
|
kfree(cachep->array[i]);
|
||||||
|
|
||||||
|
/* NUMA: free the list3 structures */
|
||||||
|
for_each_online_node(i) {
|
||||||
|
l3 = cachep->nodelists[i];
|
||||||
|
if (l3) {
|
||||||
|
kfree(l3->shared);
|
||||||
|
free_alien_cache(l3->alien);
|
||||||
|
kfree(l3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -37,6 +37,5 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
|||||||
size_t align, unsigned long flags, void (*ctor)(void *));
|
size_t align, unsigned long flags, void (*ctor)(void *));
|
||||||
|
|
||||||
int __kmem_cache_shutdown(struct kmem_cache *);
|
int __kmem_cache_shutdown(struct kmem_cache *);
|
||||||
void __kmem_cache_destroy(struct kmem_cache *);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -153,7 +153,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
|
|||||||
if (s->flags & SLAB_DESTROY_BY_RCU)
|
if (s->flags & SLAB_DESTROY_BY_RCU)
|
||||||
rcu_barrier();
|
rcu_barrier();
|
||||||
|
|
||||||
__kmem_cache_destroy(s);
|
|
||||||
kmem_cache_free(kmem_cache, s);
|
kmem_cache_free(kmem_cache, s);
|
||||||
} else {
|
} else {
|
||||||
list_add(&s->list, &slab_caches);
|
list_add(&s->list, &slab_caches);
|
||||||
|
@ -538,10 +538,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
|||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kmem_cache_destroy(struct kmem_cache *c)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
void *b;
|
void *b;
|
||||||
|
10
mm/slub.c
10
mm/slub.c
@ -3205,12 +3205,12 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
|||||||
|
|
||||||
int __kmem_cache_shutdown(struct kmem_cache *s)
|
int __kmem_cache_shutdown(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
return kmem_cache_close(s);
|
int rc = kmem_cache_close(s);
|
||||||
}
|
|
||||||
|
|
||||||
void __kmem_cache_destroy(struct kmem_cache *s)
|
if (!rc)
|
||||||
{
|
sysfs_slab_remove(s);
|
||||||
sysfs_slab_remove(s);
|
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/********************************************************************
|
/********************************************************************
|
||||||
|
Loading…
Reference in New Issue
Block a user