mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
mm: slub: SLUB_DEBUG=n: use the same alloc/free hooks as for SLUB_DEBUG=y
There are two versions of alloc/free hooks now - one for CONFIG_SLUB_DEBUG=y and another one for CONFIG_SLUB_DEBUG=n. I see no reason why calls to other debugging subsystems (LOCKDEP, DEBUG_ATOMIC_SLEEP, KMEMCHECK and FAILSLAB) are hidden under SLUB_DEBUG. All this features should work regardless of SLUB_DEBUG config, as all of them already have own Kconfig options. This also fixes failslab for CONFIG_SLUB_DEBUG=n configuration. It simply has not worked before because should_failslab() call was in a hook hidden under "#ifdef CONFIG_SLUB_DEBUG #else". Note: There is one concealed change in allocation path for SLUB_DEBUG=n and all other debugging features disabled. The might_sleep_if() call can generate some code even if DEBUG_ATOMIC_SLEEP=n. For PREEMPT_VOLUNTARY=y might_sleep() inserts _cond_resched() call, but I think it should be ok. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c07b8183cb
commit
02e72cc617
99
mm/slub.c
99
mm/slub.c
@ -939,60 +939,6 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Hooks for other subsystems that check memory allocations. In a typical
|
||||
* production configuration these hooks all should produce no code at all.
|
||||
*/
|
||||
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
||||
{
|
||||
kmemleak_alloc(ptr, size, 1, flags);
|
||||
}
|
||||
|
||||
static inline void kfree_hook(const void *x)
|
||||
{
|
||||
kmemleak_free(x);
|
||||
}
|
||||
|
||||
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
||||
{
|
||||
flags &= gfp_allowed_mask;
|
||||
lockdep_trace_alloc(flags);
|
||||
might_sleep_if(flags & __GFP_WAIT);
|
||||
|
||||
return should_failslab(s->object_size, flags, s->flags);
|
||||
}
|
||||
|
||||
static inline void slab_post_alloc_hook(struct kmem_cache *s,
|
||||
gfp_t flags, void *object)
|
||||
{
|
||||
flags &= gfp_allowed_mask;
|
||||
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
||||
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
|
||||
}
|
||||
|
||||
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
||||
{
|
||||
kmemleak_free_recursive(x, s->flags);
|
||||
|
||||
/*
|
||||
* Trouble is that we may no longer disable interrupts in the fast path
|
||||
* So in order to make the debug calls that expect irqs to be
|
||||
* disabled we need to disable interrupts temporarily.
|
||||
*/
|
||||
#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
kmemcheck_slab_free(s, x, s->object_size);
|
||||
debug_check_no_locks_freed(x, s->object_size);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
||||
debug_check_no_obj_freed(x, s->object_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tracking of fully allocated slabs for debugging purposes.
|
||||
*/
|
||||
@ -1277,6 +1223,12 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
|
||||
static inline void dec_slabs_node(struct kmem_cache *s, int node,
|
||||
int objects) {}
|
||||
|
||||
#endif /* CONFIG_SLUB_DEBUG */
|
||||
|
||||
/*
|
||||
* Hooks for other subsystems that check memory allocations. In a typical
|
||||
* production configuration these hooks all should produce no code at all.
|
||||
*/
|
||||
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
||||
{
|
||||
kmemleak_alloc(ptr, size, 1, flags);
|
||||
@ -1288,21 +1240,44 @@ static inline void kfree_hook(const void *x)
|
||||
}
|
||||
|
||||
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
||||
{ return 0; }
|
||||
|
||||
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
||||
void *object)
|
||||
{
|
||||
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
|
||||
flags & gfp_allowed_mask);
|
||||
flags &= gfp_allowed_mask;
|
||||
lockdep_trace_alloc(flags);
|
||||
might_sleep_if(flags & __GFP_WAIT);
|
||||
|
||||
return should_failslab(s->object_size, flags, s->flags);
|
||||
}
|
||||
|
||||
static inline void slab_post_alloc_hook(struct kmem_cache *s,
|
||||
gfp_t flags, void *object)
|
||||
{
|
||||
flags &= gfp_allowed_mask;
|
||||
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
||||
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
|
||||
}
|
||||
|
||||
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
||||
{
|
||||
kmemleak_free_recursive(x, s->flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SLUB_DEBUG */
|
||||
/*
|
||||
* Trouble is that we may no longer disable interrupts in the fast path
|
||||
* So in order to make the debug calls that expect irqs to be
|
||||
* disabled we need to disable interrupts temporarily.
|
||||
*/
|
||||
#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
kmemcheck_slab_free(s, x, s->object_size);
|
||||
debug_check_no_locks_freed(x, s->object_size);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
||||
debug_check_no_obj_freed(x, s->object_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Slab allocation and freeing
|
||||
|
Loading…
Reference in New Issue
Block a user