mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
mm, slab: suppress out of memory warning unless debug is enabled
When the slab or slub allocators cannot allocate additional slab pages, they emit diagnostic information to the kernel log such as current number of slabs, number of objects, active objects, etc. This is always coupled with a page allocation failure warning since it is controlled by !__GFP_NOWARN. Suppress this out of memory warning if the allocator is configured without debug supported. The page allocation failure warning will indicate it is a failed slab allocation, the order, and the gfp mask, so this is only useful to diagnose allocator issues. Since CONFIG_SLUB_DEBUG is already enabled by default for the slub allocator, there is no functional change with this patch. If debug is disabled, however, the warnings are now suppressed. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ecc42fbe95
commit
9a02d69993
@ -1621,10 +1621,16 @@ __initcall(cpucache_init);
|
|||||||
static noinline void
|
static noinline void
|
||||||
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
|
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
|
||||||
{
|
{
|
||||||
|
#if DEBUG
|
||||||
struct kmem_cache_node *n;
|
struct kmem_cache_node *n;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int node;
|
int node;
|
||||||
|
static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
|
||||||
|
DEFAULT_RATELIMIT_BURST);
|
||||||
|
|
||||||
|
if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
|
||||||
|
return;
|
||||||
|
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
||||||
@ -1662,6 +1668,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
|
|||||||
node, active_slabs, num_slabs, active_objs, num_objs,
|
node, active_slabs, num_slabs, active_objs, num_objs,
|
||||||
free_objects);
|
free_objects);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1683,7 +1690,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
|
|||||||
|
|
||||||
page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
|
page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
if (!(flags & __GFP_NOWARN) && printk_ratelimit())
|
|
||||||
slab_out_of_memory(cachep, flags, nodeid);
|
slab_out_of_memory(cachep, flags, nodeid);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
27
mm/slub.c
27
mm/slub.c
@ -2119,11 +2119,19 @@ static inline int node_match(struct page *page, int node)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
static int count_free(struct page *page)
|
static int count_free(struct page *page)
|
||||||
{
|
{
|
||||||
return page->objects - page->inuse;
|
return page->objects - page->inuse;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
|
||||||
|
{
|
||||||
|
return atomic_long_read(&n->total_objects);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_SLUB_DEBUG */
|
||||||
|
|
||||||
|
#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
|
||||||
static unsigned long count_partial(struct kmem_cache_node *n,
|
static unsigned long count_partial(struct kmem_cache_node *n,
|
||||||
int (*get_count)(struct page *))
|
int (*get_count)(struct page *))
|
||||||
{
|
{
|
||||||
@ -2137,21 +2145,19 @@ static unsigned long count_partial(struct kmem_cache_node *n,
|
|||||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
|
||||||
static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
|
||||||
return atomic_long_read(&n->total_objects);
|
|
||||||
#else
|
|
||||||
return 0;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static noinline void
|
static noinline void
|
||||||
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
|
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
|
static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
|
||||||
|
DEFAULT_RATELIMIT_BURST);
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
|
if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
|
||||||
|
return;
|
||||||
|
|
||||||
pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
||||||
nid, gfpflags);
|
nid, gfpflags);
|
||||||
pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
|
pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
|
||||||
@ -2178,6 +2184,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
|
|||||||
pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
|
pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
|
||||||
node, nr_slabs, nr_objs, nr_free);
|
node, nr_slabs, nr_objs, nr_free);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
|
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
|
||||||
@ -2356,9 +2363,7 @@ new_slab:
|
|||||||
freelist = new_slab_objects(s, gfpflags, node, &c);
|
freelist = new_slab_objects(s, gfpflags, node, &c);
|
||||||
|
|
||||||
if (unlikely(!freelist)) {
|
if (unlikely(!freelist)) {
|
||||||
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
|
|
||||||
slab_out_of_memory(s, gfpflags, node);
|
slab_out_of_memory(s, gfpflags, node);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user