mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
slub: remove kmalloc under list_lock from list_slab_objects() V2
list_slab_objects() is called when a slab is destroyed and there are
objects still left to list the objects in the syslog. This is a pretty
rare event.
And there it seems we take the list_lock and call kmalloc while holding
that lock.
Perform the allocation in free_partial() before the list_lock is taken.
Fixes: bbd7d57bfe
("slub: Potential stack overflow")
Signed-off-by: Christopher Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Yu Zhao <yuzhao@google.com>
Link: http://lkml.kernel.org/r/alpine.DEB.2.21.2002031721250.1668@www.lameter.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d7660ce591
commit
aa456c7aeb
20
mm/slub.c
20
mm/slub.c
@ -3766,12 +3766,14 @@ error:
|
||||
}
|
||||
|
||||
static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
||||
const char *text)
|
||||
const char *text, unsigned long *map)
|
||||
{
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
void *addr = page_address(page);
|
||||
void *p;
|
||||
unsigned long *map;
|
||||
|
||||
if (!map)
|
||||
return;
|
||||
|
||||
slab_err(s, page, text, s->name);
|
||||
slab_lock(page);
|
||||
@ -3784,8 +3786,6 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
||||
print_tracking(s, p);
|
||||
}
|
||||
}
|
||||
put_map(map);
|
||||
|
||||
slab_unlock(page);
|
||||
#endif
|
||||
}
|
||||
@ -3799,6 +3799,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||
{
|
||||
LIST_HEAD(discard);
|
||||
struct page *page, *h;
|
||||
unsigned long *map = NULL;
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
|
||||
#endif
|
||||
|
||||
BUG_ON(irqs_disabled());
|
||||
spin_lock_irq(&n->list_lock);
|
||||
@ -3808,11 +3813,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||
list_add(&page->slab_list, &discard);
|
||||
} else {
|
||||
list_slab_objects(s, page,
|
||||
"Objects remaining in %s on __kmem_cache_shutdown()");
|
||||
"Objects remaining in %s on __kmem_cache_shutdown()",
|
||||
map);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&n->list_lock);
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
bitmap_free(map);
|
||||
#endif
|
||||
|
||||
list_for_each_entry_safe(page, h, &discard, slab_list)
|
||||
discard_slab(s, page);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user