mm, slub: introduce static key for slub_debug()

One advantage of CONFIG_SLUB_DEBUG is that a generic distro kernel can be
built with the option enabled, but it's inactive until simply enabled on
boot, without rebuilding the kernel.  With a static key, we can further
eliminate the overhead of checking whether a cache has a particular debug
flag enabled if we know that there are no such caches (slub_debug was not
enabled during boot).  We use the same mechanism also for e.g.
page_owner, debug_pagealloc or kmemcg functionality.

This patch introduces the static key and makes the general check for
per-cache debug flags kmem_cache_debug() use it.  This benefits several
call sites, including (slow path but still rather frequent) __slab_free().
The next patches will add more uses.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Acked-by: Roman Gushchin <guro@fb.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Jann Horn <jannh@google.com>
Cc: Vijayanand Jitta <vjitta@codeaurora.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Link: http://lkml.kernel.org/r/20200610163135.17364-7-vbabka@suse.cz
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Vlastimil Babka 2020-08-06 23:18:51 -07:00 committed by Linus Torvalds
parent 8f58119ac4
commit ca0cab65ea

View File

@ -114,13 +114,21 @@
* the fast path and disables lockless freelists. * the fast path and disables lockless freelists.
*/ */
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
#endif
static inline int kmem_cache_debug(struct kmem_cache *s) static inline int kmem_cache_debug(struct kmem_cache *s)
{ {
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
return unlikely(s->flags & SLAB_DEBUG_FLAGS); if (static_branch_unlikely(&slub_debug_enabled))
#else return s->flags & SLAB_DEBUG_FLAGS;
return 0;
#endif #endif
return 0;
} }
void *fixup_red_left(struct kmem_cache *s, void *p) void *fixup_red_left(struct kmem_cache *s, void *p)
@ -1389,6 +1397,8 @@ static int __init setup_slub_debug(char *str)
slub_debug_string = saved_str; slub_debug_string = saved_str;
} }
out: out:
if (slub_debug != 0 || slub_debug_string)
static_branch_enable(&slub_debug_enabled);
if ((static_branch_unlikely(&init_on_alloc) || if ((static_branch_unlikely(&init_on_alloc) ||
static_branch_unlikely(&init_on_free)) && static_branch_unlikely(&init_on_free)) &&
(slub_debug & SLAB_POISON)) (slub_debug & SLAB_POISON))