mm, page_alloc: put should_fail_alloc_page() back behing CONFIG_FAIL_PAGE_ALLOC

This mostly reverts commit af3b854492 ("mm/page_alloc.c: allow error
injection").  The commit made should_fail_alloc_page() a noinline function
that's always called from the page allocation hotpath, even if it's empty
because CONFIG_FAIL_PAGE_ALLOC is not enabled, and there is no option to
disable it and prevent the associated function call overhead.

As with the preceding patch "mm, slab: put should_failslab back behind
CONFIG_SHOULD_FAILSLAB" and for the same reasons, put the
should_fail_alloc_page() back behind the config option.  When enabled, the
ALLOW_ERROR_INJECTION and BTF_ID records are preserved so it's not a
complete revert.

Link: https://lkml.kernel.org/r/20240711-b4-fault-injection-reverts-v1-2-9e2651945d68@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Song Liu <song@kernel.org>
Cc: Stanislav Fomichev <sdf@fomichev.me>
Cc: Yonghong Song <yonghong.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Vlastimil Babka 2024-07-11 18:35:31 +02:00 committed by Andrew Morton
parent a7526fe8b9
commit 53dabce265
4 changed files with 7 additions and 11 deletions

View File

@ -91,12 +91,10 @@ static inline void fault_config_init(struct fault_config *config,
struct kmem_cache; struct kmem_cache;
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
#ifdef CONFIG_FAIL_PAGE_ALLOC #ifdef CONFIG_FAIL_PAGE_ALLOC
bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
#else #else
static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{ {
return false; return false;
} }

View File

@ -21122,7 +21122,9 @@ BTF_SET_START(btf_non_sleepable_error_inject)
* Assume non-sleepable from bpf safety point of view. * Assume non-sleepable from bpf safety point of view.
*/ */
BTF_ID(func, __filemap_add_folio) BTF_ID(func, __filemap_add_folio)
#ifdef CONFIG_FAIL_PAGE_ALLOC
BTF_ID(func, should_fail_alloc_page) BTF_ID(func, should_fail_alloc_page)
#endif
#ifdef CONFIG_FAILSLAB #ifdef CONFIG_FAILSLAB
BTF_ID(func, should_failslab) BTF_ID(func, should_failslab)
#endif #endif

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <linux/error-injection.h>
#include <linux/mm.h> #include <linux/mm.h>
static struct { static struct {
@ -21,7 +22,7 @@ static int __init setup_fail_page_alloc(char *str)
} }
__setup("fail_page_alloc=", setup_fail_page_alloc); __setup("fail_page_alloc=", setup_fail_page_alloc);
bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{ {
int flags = 0; int flags = 0;
@ -41,6 +42,7 @@ bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags); return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags);
} }
ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

View File

@ -3050,12 +3050,6 @@ out:
return page; return page;
} }
noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
static inline long __zone_watermark_unusable_free(struct zone *z, static inline long __zone_watermark_unusable_free(struct zone *z,
unsigned int order, unsigned int alloc_flags) unsigned int order, unsigned int alloc_flags)
{ {