mm/sl[aou]b: Move kmem_cache allocations into common code
Shift the allocations to common code. That way the allocation and freeing of the kmem_cache structures is handled by common code. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
96d17b7be0
commit
278b1bb131
34
mm/slab.c
34
mm/slab.c
@ -1676,7 +1676,8 @@ void __init kmem_cache_init(void)
|
|||||||
* bug.
|
* bug.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
|
sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
|
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name,
|
||||||
sizes[INDEX_AC].cs_size,
|
sizes[INDEX_AC].cs_size,
|
||||||
ARCH_KMALLOC_MINALIGN,
|
ARCH_KMALLOC_MINALIGN,
|
||||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
||||||
@ -1684,8 +1685,8 @@ void __init kmem_cache_init(void)
|
|||||||
|
|
||||||
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
|
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
|
||||||
if (INDEX_AC != INDEX_L3) {
|
if (INDEX_AC != INDEX_L3) {
|
||||||
sizes[INDEX_L3].cs_cachep =
|
sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
__kmem_cache_create(names[INDEX_L3].name,
|
__kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name,
|
||||||
sizes[INDEX_L3].cs_size,
|
sizes[INDEX_L3].cs_size,
|
||||||
ARCH_KMALLOC_MINALIGN,
|
ARCH_KMALLOC_MINALIGN,
|
||||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
||||||
@ -1704,7 +1705,8 @@ void __init kmem_cache_init(void)
|
|||||||
* allow tighter packing of the smaller caches.
|
* allow tighter packing of the smaller caches.
|
||||||
*/
|
*/
|
||||||
if (!sizes->cs_cachep) {
|
if (!sizes->cs_cachep) {
|
||||||
sizes->cs_cachep = __kmem_cache_create(names->name,
|
sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
|
__kmem_cache_create(sizes->cs_cachep, names->name,
|
||||||
sizes->cs_size,
|
sizes->cs_size,
|
||||||
ARCH_KMALLOC_MINALIGN,
|
ARCH_KMALLOC_MINALIGN,
|
||||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
||||||
@ -1712,7 +1714,8 @@ void __init kmem_cache_init(void)
|
|||||||
list_add(&sizes->cs_cachep->list, &slab_caches);
|
list_add(&sizes->cs_cachep->list, &slab_caches);
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
sizes->cs_dmacachep = __kmem_cache_create(
|
sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
|
__kmem_cache_create(sizes->cs_dmacachep,
|
||||||
names->name_dma,
|
names->name_dma,
|
||||||
sizes->cs_size,
|
sizes->cs_size,
|
||||||
ARCH_KMALLOC_MINALIGN,
|
ARCH_KMALLOC_MINALIGN,
|
||||||
@ -2356,13 +2359,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|||||||
* cacheline. This can be beneficial if you're counting cycles as closely
|
* cacheline. This can be beneficial if you're counting cycles as closely
|
||||||
* as davem.
|
* as davem.
|
||||||
*/
|
*/
|
||||||
struct kmem_cache *
|
int
|
||||||
__kmem_cache_create (const char *name, size_t size, size_t align,
|
__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align,
|
||||||
unsigned long flags, void (*ctor)(void *))
|
unsigned long flags, void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
size_t left_over, slab_size, ralign;
|
size_t left_over, slab_size, ralign;
|
||||||
struct kmem_cache *cachep = NULL;
|
|
||||||
gfp_t gfp;
|
gfp_t gfp;
|
||||||
|
int err;
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
#if FORCED_DEBUG
|
#if FORCED_DEBUG
|
||||||
@ -2450,11 +2453,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
else
|
else
|
||||||
gfp = GFP_NOWAIT;
|
gfp = GFP_NOWAIT;
|
||||||
|
|
||||||
/* Get cache's description obj. */
|
|
||||||
cachep = kmem_cache_zalloc(kmem_cache, gfp);
|
|
||||||
if (!cachep)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
|
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
|
||||||
cachep->object_size = size;
|
cachep->object_size = size;
|
||||||
cachep->align = align;
|
cachep->align = align;
|
||||||
@ -2509,8 +2507,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
if (!cachep->num) {
|
if (!cachep->num) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"kmem_cache_create: couldn't create cache %s.\n", name);
|
"kmem_cache_create: couldn't create cache %s.\n", name);
|
||||||
kmem_cache_free(kmem_cache, cachep);
|
return -E2BIG;
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
|
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
|
||||||
+ sizeof(struct slab), align);
|
+ sizeof(struct slab), align);
|
||||||
@ -2567,9 +2564,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
cachep->name = name;
|
cachep->name = name;
|
||||||
cachep->refcount = 1;
|
cachep->refcount = 1;
|
||||||
|
|
||||||
if (setup_cpu_cache(cachep, gfp)) {
|
err = setup_cpu_cache(cachep, gfp);
|
||||||
|
if (err) {
|
||||||
__kmem_cache_shutdown(cachep);
|
__kmem_cache_shutdown(cachep);
|
||||||
return NULL;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & SLAB_DEBUG_OBJECTS) {
|
if (flags & SLAB_DEBUG_OBJECTS) {
|
||||||
@ -2582,7 +2580,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
slab_set_debugobj_lock_classes(cachep);
|
slab_set_debugobj_lock_classes(cachep);
|
||||||
}
|
}
|
||||||
|
|
||||||
return cachep;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
|
@ -33,8 +33,8 @@ extern struct list_head slab_caches;
|
|||||||
extern struct kmem_cache *kmem_cache;
|
extern struct kmem_cache *kmem_cache;
|
||||||
|
|
||||||
/* Functions provided by the slab allocators */
|
/* Functions provided by the slab allocators */
|
||||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
extern int __kmem_cache_create(struct kmem_cache *, const char *name,
|
||||||
size_t align, unsigned long flags, void (*ctor)(void *));
|
size_t size, size_t align, unsigned long flags, void (*ctor)(void *));
|
||||||
|
|
||||||
#ifdef CONFIG_SLUB
|
#ifdef CONFIG_SLUB
|
||||||
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
|
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
|
||||||
|
@ -119,19 +119,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
|
|||||||
if (s)
|
if (s)
|
||||||
goto out_locked;
|
goto out_locked;
|
||||||
|
|
||||||
s = __kmem_cache_create(n, size, align, flags, ctor);
|
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
|
||||||
|
|
||||||
if (s) {
|
if (s) {
|
||||||
/*
|
err = __kmem_cache_create(s, n, size, align, flags, ctor);
|
||||||
* Check if the slab has actually been created and if it was a
|
if (!err)
|
||||||
* real instatiation. Aliases do not belong on the list
|
|
||||||
*/
|
|
||||||
if (s->refcount == 1)
|
|
||||||
list_add(&s->list, &slab_caches);
|
list_add(&s->list, &slab_caches);
|
||||||
|
|
||||||
|
else {
|
||||||
|
kfree(n);
|
||||||
|
kmem_cache_free(kmem_cache, s);
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
kfree(n);
|
kfree(n);
|
||||||
err = -ENOSYS; /* Until __kmem_cache_create returns code */
|
err = -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_locked:
|
out_locked:
|
||||||
|
12
mm/slob.c
12
mm/slob.c
@ -508,15 +508,9 @@ size_t ksize(const void *block)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ksize);
|
EXPORT_SYMBOL(ksize);
|
||||||
|
|
||||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size,
|
||||||
size_t align, unsigned long flags, void (*ctor)(void *))
|
size_t align, unsigned long flags, void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
struct kmem_cache *c;
|
|
||||||
|
|
||||||
c = slob_alloc(sizeof(struct kmem_cache),
|
|
||||||
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
|
|
||||||
|
|
||||||
if (c) {
|
|
||||||
c->name = name;
|
c->name = name;
|
||||||
c->size = size;
|
c->size = size;
|
||||||
if (flags & SLAB_DESTROY_BY_RCU) {
|
if (flags & SLAB_DESTROY_BY_RCU) {
|
||||||
@ -532,10 +526,8 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
|||||||
if (c->align < align)
|
if (c->align < align)
|
||||||
c->align = align;
|
c->align = align;
|
||||||
|
|
||||||
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
|
|
||||||
c->refcount = 1;
|
c->refcount = 1;
|
||||||
}
|
return 0;
|
||||||
return c;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
||||||
|
24
mm/slub.c
24
mm/slub.c
@ -3034,7 +3034,6 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|||||||
size_t align, unsigned long flags,
|
size_t align, unsigned long flags,
|
||||||
void (*ctor)(void *))
|
void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
memset(s, 0, kmem_size);
|
|
||||||
s->name = name;
|
s->name = name;
|
||||||
s->ctor = ctor;
|
s->ctor = ctor;
|
||||||
s->object_size = size;
|
s->object_size = size;
|
||||||
@ -3109,7 +3108,7 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (alloc_kmem_cache_cpus(s))
|
if (alloc_kmem_cache_cpus(s))
|
||||||
return 1;
|
return 0;
|
||||||
|
|
||||||
free_kmem_cache_nodes(s);
|
free_kmem_cache_nodes(s);
|
||||||
error:
|
error:
|
||||||
@ -3118,7 +3117,7 @@ error:
|
|||||||
"order=%u offset=%u flags=%lx\n",
|
"order=%u offset=%u flags=%lx\n",
|
||||||
s->name, (unsigned long)size, s->size, oo_order(s->oo),
|
s->name, (unsigned long)size, s->size, oo_order(s->oo),
|
||||||
s->offset, flags);
|
s->offset, flags);
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3260,13 +3259,13 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
|
|||||||
{
|
{
|
||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
|
|
||||||
s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
|
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is called with IRQs disabled during early-boot on
|
* This function is called with IRQs disabled during early-boot on
|
||||||
* single CPU so there's no need to take slab_mutex here.
|
* single CPU so there's no need to take slab_mutex here.
|
||||||
*/
|
*/
|
||||||
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
|
if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
|
||||||
flags, NULL))
|
flags, NULL))
|
||||||
goto panic;
|
goto panic;
|
||||||
|
|
||||||
@ -3944,20 +3943,11 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
int __kmem_cache_create(struct kmem_cache *s,
|
||||||
|
const char *name, size_t size,
|
||||||
size_t align, unsigned long flags, void (*ctor)(void *))
|
size_t align, unsigned long flags, void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
struct kmem_cache *s;
|
return kmem_cache_open(s, name, size, align, flags, ctor);
|
||||||
|
|
||||||
s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
|
|
||||||
if (s) {
|
|
||||||
if (kmem_cache_open(s, name,
|
|
||||||
size, align, flags, ctor)) {
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
kmem_cache_free(kmem_cache, s);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
Loading…
Reference in New Issue
Block a user