mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
Modules changes for v6.10-rc1
Finally something fun. Mike Rapoport does some cleanup to allow us to take out module_alloc() out of modules into a new paint shedded execmem_alloc() and execmem_free() so to make emphasis these helpers are actually used outside of modules. It starts with a no-functional changes API rename / placeholders to then allow architectures to define their requirements into a new shiny struct execmem_info with ranges, and requirements for those ranges. Archs now can intitialize this execmem_info as the last part of mm_core_init() if they have to diverge from the norm. Each range is a known type clearly articulated and spelled out in enum execmem_type. Although a lot of this is major cleanup and prep work for future enhancements an immediate clear gain is we get to enable KPROBES without MODULES now. That is ultimately what motiviated to pick this work up again, now with smaller goal as concrete stepping stone. This has been sitting on linux-next for a little less than a month, a few issues were found already and fixed, in particular an odd mips boot issue. Arch folks reviewed the code too. This is ready for wider exposure and testing. -----BEGIN PGP SIGNATURE----- iQJGBAABCgAwFiEENnNq2KuOejlQLZofziMdCjCSiKcFAmZDHfMSHG1jZ3JvZkBr ZXJuZWwub3JnAAoJEM4jHQowkoinfIwP/iFsr89v9BjWdRTqzufuHwjOxvFymWxU BbEpOppRny3CckDU9ag9hLIlUaSL1Bg56Zb+znzp5stKOoiQYMDBvjSYdfybPxW2 mRS6SClMF1ubWbzdysdp5Ld9u8T0MQPCLX+P2pKhZRGi0wjkBf5WEkTje+muJKI3 4vYkXS7bNhuTwRQ+EGfze4+AeleGdQJKDWFY00TW9mZTTBADjfHyYU5o0m9ijf5l 3V/weUznODvjVJStbIF7wEQ845Ae02LN1zXfsloIOuBMhcMju+x8IjPgPbD0KhX2 yA48q7mVWkirYp0L5GSQchtqV1GBiP0NK1xXWEpyx6EqQZ4RJCsQhlhjijoExYBR ylP4bqiGVuE3IN075X0OzGCnmOStuzwssfDmug0sMAZH/MvmOQ21WzZdet2nLMas wwJArHqZsBI9BnBlvH9ZM4Y9f1zC7iR1wULaNGwXLPx34X9PIch8Yk+RElP1kMFQ +YrjOuWPjl63pmSkrkk+Pe2eesMPcPB41M6Q2iCjDlp0iBp63LIx2XISUbTf0ljM EsI4ZQseYpx+BmC7AuQfmXvEOjuXII9z072/artVWcB2u/87ixIprnqZVhcs/spy 73DnXB4ufor2PCCC5Xrb/6kT6G+PzF3VwTbHQ1D+fYZ5n2qdyG+LKxgXbtxsRVTp oUg+Z/AJaCMt =Nsg4 -----END PGP SIGNATURE----- Merge tag 'modules-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux Pull modules updates from Luis Chamberlain: "Finally something fun. Mike Rapoport does some cleanup to allow us to take out module_alloc() out of modules into a new paint shedded execmem_alloc() and execmem_free() so to make emphasis these helpers are actually used outside of modules. It starts with a non-functional changes API rename / placeholders to then allow architectures to define their requirements into a new shiny struct execmem_info with ranges, and requirements for those ranges. Archs now can intitialize this execmem_info as the last part of mm_core_init() if they have to diverge from the norm. Each range is a known type clearly articulated and spelled out in enum execmem_type. Although a lot of this is major cleanup and prep work for future enhancements an immediate clear gain is we get to enable KPROBES without MODULES now. That is ultimately what motiviated to pick this work up again, now with smaller goal as concrete stepping stone" * tag 'modules-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux: bpf: remove CONFIG_BPF_JIT dependency on CONFIG_MODULES of kprobes: remove dependency on CONFIG_MODULES powerpc: use CONFIG_EXECMEM instead of CONFIG_MODULES where appropriate x86/ftrace: enable dynamic ftrace without CONFIG_MODULES arch: make execmem setup available regardless of CONFIG_MODULES powerpc: extend execmem_params for kprobes allocations arm64: extend execmem_info for generated code allocations riscv: extend execmem_params for generated code allocations mm/execmem, arch: convert remaining overrides of module_alloc to execmem mm/execmem, arch: convert simple overrides of module_alloc to execmem mm: introduce execmem_alloc() and execmem_free() module: make module_memory_{alloc,free} more self-contained sparc: simplify module_alloc() nios2: define virtual address space for modules mips: module: rename MODULE_START to MODULES_VADDR arm64: module: remove unneeded call to kasan_alloc_module_shadow() kallsyms: replace deprecated strncpy with strscpy module: allow UNUSED_KSYMS_WHITELIST to be relative against objtree.
This commit is contained in:
commit
a49468240e
10
arch/Kconfig
10
arch/Kconfig
@ -60,9 +60,9 @@ config GENERIC_ENTRY
|
||||
|
||||
config KPROBES
|
||||
bool "Kprobes"
|
||||
depends on MODULES
|
||||
depends on HAVE_KPROBES
|
||||
select KALLSYMS
|
||||
select EXECMEM
|
||||
select NEED_TASKS_RCU
|
||||
help
|
||||
Kprobes allows you to trap at almost any kernel address and
|
||||
@ -977,6 +977,14 @@ config ARCH_WANTS_MODULES_DATA_IN_VMALLOC
|
||||
For architectures like powerpc/32 which have constraints on module
|
||||
allocation and need to allocate module data outside of module area.
|
||||
|
||||
config ARCH_WANTS_EXECMEM_LATE
|
||||
bool
|
||||
help
|
||||
For architectures that do not allocate executable memory early on
|
||||
boot, but rather require its initialization late when there is
|
||||
enough entropy for module space randomization, for instance
|
||||
arm64.
|
||||
|
||||
config HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
bool
|
||||
help
|
||||
|
@ -12,48 +12,14 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
/*
|
||||
* The XIP kernel text is mapped in the module area for modules and
|
||||
* some other stuff to work without any indirect relocations.
|
||||
* MODULES_VADDR is redefined here and not in asm/memory.h to avoid
|
||||
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
|
||||
*/
|
||||
#undef MODULES_VADDR
|
||||
#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
gfp_t gfp_mask = GFP_KERNEL;
|
||||
void *p;
|
||||
|
||||
/* Silence the initial allocation */
|
||||
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS))
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
|
||||
p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
|
||||
gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
|
||||
return p;
|
||||
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
#endif
|
||||
|
||||
bool module_init_section(const char *name)
|
||||
{
|
||||
return strstarts(name, ".init") ||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/mach-types.h>
|
||||
@ -486,3 +487,47 @@ void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
/*
|
||||
* The XIP kernel text is mapped in the module area for modules and
|
||||
* some other stuff to work without any indirect relocations.
|
||||
* MODULES_VADDR is redefined here and not in asm/memory.h to avoid
|
||||
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
|
||||
*/
|
||||
#undef MODULES_VADDR
|
||||
#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
unsigned long fallback_start = 0, fallback_end = 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS)) {
|
||||
fallback_start = VMALLOC_START;
|
||||
fallback_end = VMALLOC_END;
|
||||
}
|
||||
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = MODULES_VADDR,
|
||||
.end = MODULES_END,
|
||||
.pgprot = PAGE_KERNEL_EXEC,
|
||||
.alignment = 1,
|
||||
.fallback_start = fallback_start,
|
||||
.fallback_end = fallback_end,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -105,6 +105,7 @@ config ARM64
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANTS_EXECMEM_LATE if EXECMEM
|
||||
select ARCH_WANTS_NO_INSTR
|
||||
select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
|
||||
select ARCH_HAS_UBSAN
|
||||
|
@ -12,144 +12,18 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/scs.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/scs.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
static u64 module_direct_base __ro_after_init = 0;
|
||||
static u64 module_plt_base __ro_after_init = 0;
|
||||
|
||||
/*
|
||||
* Choose a random page-aligned base address for a window of 'size' bytes which
|
||||
* entirely contains the interval [start, end - 1].
|
||||
*/
|
||||
static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
|
||||
{
|
||||
u64 max_pgoff, pgoff;
|
||||
|
||||
if ((end - start) >= size)
|
||||
return 0;
|
||||
|
||||
max_pgoff = (size - (end - start)) / PAGE_SIZE;
|
||||
pgoff = get_random_u32_inclusive(0, max_pgoff);
|
||||
|
||||
return start - pgoff * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modules may directly reference data and text anywhere within the kernel
|
||||
* image and other modules. References using PREL32 relocations have a +/-2G
|
||||
* range, and so we need to ensure that the entire kernel image and all modules
|
||||
* fall within a 2G window such that these are always within range.
|
||||
*
|
||||
* Modules may directly branch to functions and code within the kernel text,
|
||||
* and to functions and code within other modules. These branches will use
|
||||
* CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
|
||||
* that the entire kernel text and all module text falls within a 128M window
|
||||
* such that these are always within range. With PLTs, we can expand this to a
|
||||
* 2G window.
|
||||
*
|
||||
* We chose the 128M region to surround the entire kernel image (rather than
|
||||
* just the text) as using the same bounds for the 128M and 2G regions ensures
|
||||
* by construction that we never select a 128M region that is not a subset of
|
||||
* the 2G region. For very large and unusual kernel configurations this means
|
||||
* we may fall back to PLTs where they could have been avoided, but this keeps
|
||||
* the logic significantly simpler.
|
||||
*/
|
||||
static int __init module_init_limits(void)
|
||||
{
|
||||
u64 kernel_end = (u64)_end;
|
||||
u64 kernel_start = (u64)_text;
|
||||
u64 kernel_size = kernel_end - kernel_start;
|
||||
|
||||
/*
|
||||
* The default modules region is placed immediately below the kernel
|
||||
* image, and is large enough to use the full 2G relocation range.
|
||||
*/
|
||||
BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
|
||||
BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
|
||||
|
||||
if (!kaslr_enabled()) {
|
||||
if (kernel_size < SZ_128M)
|
||||
module_direct_base = kernel_end - SZ_128M;
|
||||
if (kernel_size < SZ_2G)
|
||||
module_plt_base = kernel_end - SZ_2G;
|
||||
} else {
|
||||
u64 min = kernel_start;
|
||||
u64 max = kernel_end;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
|
||||
pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
|
||||
} else {
|
||||
module_direct_base = random_bounding_box(SZ_128M, min, max);
|
||||
if (module_direct_base) {
|
||||
min = module_direct_base;
|
||||
max = module_direct_base + SZ_128M;
|
||||
}
|
||||
}
|
||||
|
||||
module_plt_base = random_bounding_box(SZ_2G, min, max);
|
||||
}
|
||||
|
||||
pr_info("%llu pages in range for non-PLT usage",
|
||||
module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
|
||||
pr_info("%llu pages in range for PLT usage",
|
||||
module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(module_init_limits);
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
void *p = NULL;
|
||||
|
||||
/*
|
||||
* Where possible, prefer to allocate within direct branch range of the
|
||||
* kernel such that no PLTs are necessary.
|
||||
*/
|
||||
if (module_direct_base) {
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
||||
module_direct_base,
|
||||
module_direct_base + SZ_128M,
|
||||
GFP_KERNEL | __GFP_NOWARN,
|
||||
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
if (!p && module_plt_base) {
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
||||
module_plt_base,
|
||||
module_plt_base + SZ_2G,
|
||||
GFP_KERNEL | __GFP_NOWARN,
|
||||
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
if (!p) {
|
||||
pr_warn_ratelimited("%s: unable to allocate memory\n",
|
||||
__func__);
|
||||
}
|
||||
|
||||
if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
|
||||
vfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Memory is intended to be executable, reset the pointer tag. */
|
||||
return kasan_reset_tag(p);
|
||||
}
|
||||
|
||||
enum aarch64_reloc_op {
|
||||
RELOC_OP_NONE,
|
||||
RELOC_OP_ABS,
|
||||
|
@ -129,13 +129,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *alloc_insn_page(void)
|
||||
{
|
||||
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
|
||||
NUMA_NO_NODE, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
/* arm kprobe: install breakpoint in text */
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/acpi_iort.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/boot.h>
|
||||
#include <asm/fixmap.h>
|
||||
@ -432,3 +433,142 @@ void dump_mem_limit(void)
|
||||
pr_emerg("Memory Limit: none\n");
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
static u64 module_direct_base __ro_after_init = 0;
|
||||
static u64 module_plt_base __ro_after_init = 0;
|
||||
|
||||
/*
|
||||
* Choose a random page-aligned base address for a window of 'size' bytes which
|
||||
* entirely contains the interval [start, end - 1].
|
||||
*/
|
||||
static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
|
||||
{
|
||||
u64 max_pgoff, pgoff;
|
||||
|
||||
if ((end - start) >= size)
|
||||
return 0;
|
||||
|
||||
max_pgoff = (size - (end - start)) / PAGE_SIZE;
|
||||
pgoff = get_random_u32_inclusive(0, max_pgoff);
|
||||
|
||||
return start - pgoff * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modules may directly reference data and text anywhere within the kernel
|
||||
* image and other modules. References using PREL32 relocations have a +/-2G
|
||||
* range, and so we need to ensure that the entire kernel image and all modules
|
||||
* fall within a 2G window such that these are always within range.
|
||||
*
|
||||
* Modules may directly branch to functions and code within the kernel text,
|
||||
* and to functions and code within other modules. These branches will use
|
||||
* CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
|
||||
* that the entire kernel text and all module text falls within a 128M window
|
||||
* such that these are always within range. With PLTs, we can expand this to a
|
||||
* 2G window.
|
||||
*
|
||||
* We chose the 128M region to surround the entire kernel image (rather than
|
||||
* just the text) as using the same bounds for the 128M and 2G regions ensures
|
||||
* by construction that we never select a 128M region that is not a subset of
|
||||
* the 2G region. For very large and unusual kernel configurations this means
|
||||
* we may fall back to PLTs where they could have been avoided, but this keeps
|
||||
* the logic significantly simpler.
|
||||
*/
|
||||
static int __init module_init_limits(void)
|
||||
{
|
||||
u64 kernel_end = (u64)_end;
|
||||
u64 kernel_start = (u64)_text;
|
||||
u64 kernel_size = kernel_end - kernel_start;
|
||||
|
||||
/*
|
||||
* The default modules region is placed immediately below the kernel
|
||||
* image, and is large enough to use the full 2G relocation range.
|
||||
*/
|
||||
BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
|
||||
BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
|
||||
|
||||
if (!kaslr_enabled()) {
|
||||
if (kernel_size < SZ_128M)
|
||||
module_direct_base = kernel_end - SZ_128M;
|
||||
if (kernel_size < SZ_2G)
|
||||
module_plt_base = kernel_end - SZ_2G;
|
||||
} else {
|
||||
u64 min = kernel_start;
|
||||
u64 max = kernel_end;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
|
||||
pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
|
||||
} else {
|
||||
module_direct_base = random_bounding_box(SZ_128M, min, max);
|
||||
if (module_direct_base) {
|
||||
min = module_direct_base;
|
||||
max = module_direct_base + SZ_128M;
|
||||
}
|
||||
}
|
||||
|
||||
module_plt_base = random_bounding_box(SZ_2G, min, max);
|
||||
}
|
||||
|
||||
pr_info("%llu pages in range for non-PLT usage",
|
||||
module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
|
||||
pr_info("%llu pages in range for PLT usage",
|
||||
module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
unsigned long fallback_start = 0, fallback_end = 0;
|
||||
unsigned long start = 0, end = 0;
|
||||
|
||||
module_init_limits();
|
||||
|
||||
/*
|
||||
* Where possible, prefer to allocate within direct branch range of the
|
||||
* kernel such that no PLTs are necessary.
|
||||
*/
|
||||
if (module_direct_base) {
|
||||
start = module_direct_base;
|
||||
end = module_direct_base + SZ_128M;
|
||||
|
||||
if (module_plt_base) {
|
||||
fallback_start = module_plt_base;
|
||||
fallback_end = module_plt_base + SZ_2G;
|
||||
}
|
||||
} else if (module_plt_base) {
|
||||
start = module_plt_base;
|
||||
end = module_plt_base + SZ_2G;
|
||||
}
|
||||
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = start,
|
||||
.end = end,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = 1,
|
||||
.fallback_start = fallback_start,
|
||||
.fallback_end = fallback_end,
|
||||
},
|
||||
[EXECMEM_KPROBES] = {
|
||||
.start = VMALLOC_START,
|
||||
.end = VMALLOC_END,
|
||||
.pgprot = PAGE_KERNEL_ROX,
|
||||
.alignment = 1,
|
||||
},
|
||||
[EXECMEM_BPF] = {
|
||||
.start = VMALLOC_START,
|
||||
.end = VMALLOC_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -1897,17 +1897,6 @@ u64 bpf_jit_alloc_exec_limit(void)
|
||||
return VMALLOC_END - VMALLOC_START;
|
||||
}
|
||||
|
||||
void *bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
/* Memory is intended to be executable, reset the pointer tag. */
|
||||
return kasan_reset_tag(vmalloc(size));
|
||||
}
|
||||
|
||||
void bpf_jit_free_exec(void *addr)
|
||||
{
|
||||
return vfree(addr);
|
||||
}
|
||||
|
||||
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
|
||||
bool bpf_jit_supports_subprog_tailcalls(void)
|
||||
{
|
||||
|
@ -490,12 +490,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
|
||||
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs, struct module *mod)
|
||||
{
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/bootinfo.h>
|
||||
@ -248,3 +249,23 @@ EXPORT_SYMBOL(invalid_pmd_table);
|
||||
#endif
|
||||
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
|
||||
EXPORT_SYMBOL(invalid_pte_table);
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = MODULES_VADDR,
|
||||
.end = MODULES_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -147,8 +147,8 @@
|
||||
#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
|
||||
VMALLOC_START != CKSSEG
|
||||
/* Load modules into 32bit-compatible segment. */
|
||||
#define MODULE_START CKSSEG
|
||||
#define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
|
||||
#define MODULES_VADDR CKSSEG
|
||||
#define MODULES_END (FIXADDR_START-2*PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/elf.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
@ -31,15 +30,6 @@ struct mips_hi16 {
|
||||
static LIST_HEAD(dbe_list);
|
||||
static DEFINE_SPINLOCK(dbe_lock);
|
||||
|
||||
#ifdef MODULE_START
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
|
||||
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
#endif
|
||||
|
||||
static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v)
|
||||
{
|
||||
*location = base + v;
|
||||
|
@ -83,8 +83,8 @@ static void __do_page_fault(struct pt_regs *regs, unsigned long write,
|
||||
|
||||
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
|
||||
goto VMALLOC_FAULT_TARGET;
|
||||
#ifdef MODULE_START
|
||||
if (unlikely(address >= MODULE_START && address < MODULE_END))
|
||||
#ifdef MODULES_VADDR
|
||||
if (unlikely(address >= MODULES_VADDR && address < MODULES_END))
|
||||
goto VMALLOC_FAULT_TARGET;
|
||||
#endif
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kcore.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cachectl.h>
|
||||
@ -576,3 +577,25 @@ EXPORT_SYMBOL_GPL(invalid_pmd_table);
|
||||
#endif
|
||||
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
|
||||
EXPORT_SYMBOL(invalid_pte_table);
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
#ifdef MODULES_VADDR
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = MODULES_VADDR,
|
||||
.end = MODULES_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -25,7 +25,10 @@
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
|
||||
#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
|
||||
#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - SZ_32M - 1)
|
||||
|
||||
#define MODULES_VADDR (CONFIG_NIOS2_KERNEL_REGION_BASE - SZ_32M)
|
||||
#define MODULES_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
@ -21,25 +20,6 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/*
|
||||
* Modules should NOT be allocated with kmalloc for (obvious) reasons.
|
||||
* But we do it for now to avoid relocation issues. CALL26/PCREL26 cannot reach
|
||||
* from 0x80000000 (vmalloc area) to 0xc00000000 (kernel) (kmalloc returns
|
||||
* addresses in 0xc0000000)
|
||||
*/
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
return kmalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/* Free memory returned from module_alloc */
|
||||
void module_memfree(void *module_region)
|
||||
{
|
||||
kfree(module_region);
|
||||
}
|
||||
|
||||
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
||||
unsigned int symindex, unsigned int relsec,
|
||||
struct module *mod)
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
@ -143,3 +144,23 @@ static const pgprot_t protection_map[16] = {
|
||||
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 1, 1)
|
||||
};
|
||||
DECLARE_VM_GET_PAGE_PROT
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = MODULES_VADDR,
|
||||
.end = MODULES_END,
|
||||
.pgprot = PAGE_KERNEL_EXEC,
|
||||
.alignment = 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -41,7 +41,6 @@
|
||||
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/string.h>
|
||||
@ -173,17 +172,6 @@ static inline int reassemble_22(int as22)
|
||||
((as22 & 0x0003ff) << 3));
|
||||
}
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
/* using RWX means less protection for modules, but it's
|
||||
* easier than trying to map the text, data, init_text and
|
||||
* init_data correctly */
|
||||
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_KERNEL,
|
||||
PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
|
||||
{
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/nodemask.h> /* for node_online_map */
|
||||
#include <linux/pagemap.h> /* for release_pages */
|
||||
#include <linux/compat.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
@ -481,7 +482,7 @@ void free_initmem(void)
|
||||
/* finally dump all the instructions which were cached, since the
|
||||
* pages are no-longer executable */
|
||||
flush_icache_range(init_begin, init_end);
|
||||
|
||||
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
|
||||
/* set up a new led state on systems shipped LED State panel */
|
||||
@ -992,3 +993,23 @@ static const pgprot_t protection_map[16] = {
|
||||
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
|
||||
};
|
||||
DECLARE_VM_GET_PAGE_PROT
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = VMALLOC_START,
|
||||
.end = VMALLOC_END,
|
||||
.pgprot = PAGE_KERNEL_RWX,
|
||||
.alignment = 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -286,7 +286,7 @@ config PPC
|
||||
select IOMMU_HELPER if PPC64
|
||||
select IRQ_DOMAIN
|
||||
select IRQ_FORCED_THREADING
|
||||
select KASAN_VMALLOC if KASAN && MODULES
|
||||
select KASAN_VMALLOC if KASAN && EXECMEM
|
||||
select LOCK_MM_AND_FIND_VMA
|
||||
select MMU_GATHER_PAGE_SIZE
|
||||
select MMU_GATHER_RCU_TABLE_FREE
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
|
||||
#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32)
|
||||
#if defined(CONFIG_EXECMEM) && defined(CONFIG_PPC32)
|
||||
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
|
||||
#else
|
||||
#define KASAN_KERN_START PAGE_OFFSET
|
||||
|
@ -199,12 +199,12 @@ instruction_counter:
|
||||
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
|
||||
INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
|
||||
mtspr SPRN_MD_EPN, r10
|
||||
#ifdef CONFIG_MODULES
|
||||
#ifdef CONFIG_EXECMEM
|
||||
mfcr r11
|
||||
compare_to_kernel_boundary r10, r10
|
||||
#endif
|
||||
mfspr r10, SPRN_M_TWB /* Get level 1 table */
|
||||
#ifdef CONFIG_MODULES
|
||||
#ifdef CONFIG_EXECMEM
|
||||
blt+ 3f
|
||||
rlwinm r10, r10, 0, 20, 31
|
||||
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
|
||||
|
@ -419,14 +419,14 @@ InstructionTLBMiss:
|
||||
*/
|
||||
/* Get PTE (linux-style) and check access */
|
||||
mfspr r3,SPRN_IMISS
|
||||
#ifdef CONFIG_MODULES
|
||||
#ifdef CONFIG_EXECMEM
|
||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
#endif
|
||||
mfspr r2, SPRN_SDR1
|
||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||
rlwinm r2, r2, 28, 0xfffff000
|
||||
#ifdef CONFIG_MODULES
|
||||
#ifdef CONFIG_EXECMEM
|
||||
li r0, 3
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
@ -442,7 +442,7 @@ InstructionTLBMiss:
|
||||
andc. r1,r1,r2 /* check access & ~permission */
|
||||
bne- InstructionAddressInvalid /* return if access not permitted */
|
||||
/* Convert linux-style PTE to low word of PPC-style PTE */
|
||||
#ifdef CONFIG_MODULES
|
||||
#ifdef CONFIG_EXECMEM
|
||||
rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
|
||||
#endif
|
||||
ori r1, r1, 0xe06 /* clear out reserved bits */
|
||||
|
@ -19,8 +19,8 @@
|
||||
#include <linux/extable.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sstep.h>
|
||||
@ -126,26 +126,6 @@ kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offse
|
||||
return (kprobe_opcode_t *)(addr + offset);
|
||||
}
|
||||
|
||||
void *alloc_insn_page(void)
|
||||
{
|
||||
void *page;
|
||||
|
||||
page = module_alloc(PAGE_SIZE);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (strict_module_rwx_enabled()) {
|
||||
int err = set_memory_rox((unsigned long)page, 1);
|
||||
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
return page;
|
||||
error:
|
||||
module_memfree(page);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/elf.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/module.h>
|
||||
@ -88,40 +87,3 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline void *
|
||||
__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
|
||||
{
|
||||
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
|
||||
gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
|
||||
|
||||
/*
|
||||
* Don't do huge page allocations for modules yet until more testing
|
||||
* is done. STRICT_MODULE_RWX may require extra work to support this
|
||||
* too.
|
||||
*/
|
||||
return __vmalloc_node_range(size, 1, start, end, gfp, prot,
|
||||
VM_FLUSH_RESET_PERMS,
|
||||
NUMA_NO_NODE, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
#ifdef MODULES_VADDR
|
||||
unsigned long limit = (unsigned long)_etext - SZ_32M;
|
||||
void *ptr = NULL;
|
||||
|
||||
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
|
||||
|
||||
/* First try within 32M limit from _etext to avoid branch trampolines */
|
||||
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
|
||||
ptr = __module_alloc(size, limit, MODULES_END, true);
|
||||
|
||||
if (!ptr)
|
||||
ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
|
||||
|
||||
return ptr;
|
||||
#else
|
||||
return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
|
||||
#endif
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ void __init poking_init(void)
|
||||
|
||||
static unsigned long get_patch_pfn(void *addr)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
|
||||
if (IS_ENABLED(CONFIG_EXECMEM) && is_vmalloc_or_module_addr(addr))
|
||||
return vmalloc_to_pfn(addr);
|
||||
else
|
||||
return __pa_symbol(addr) >> PAGE_SHIFT;
|
||||
|
@ -184,7 +184,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
|
||||
|
||||
static bool is_module_segment(unsigned long addr)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_MODULES))
|
||||
if (!IS_ENABLED(CONFIG_EXECMEM))
|
||||
return false;
|
||||
if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
|
||||
return false;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/swiotlb.h>
|
||||
#include <asm/machdep.h>
|
||||
@ -406,3 +407,66 @@ int devmem_is_allowed(unsigned long pfn)
|
||||
* the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
|
||||
*/
|
||||
EXPORT_SYMBOL_GPL(walk_system_ram_range);
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
|
||||
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
|
||||
unsigned long fallback_start = 0, fallback_end = 0;
|
||||
unsigned long start, end;
|
||||
|
||||
/*
|
||||
* BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and
|
||||
* allow allocating data in the entire vmalloc space
|
||||
*/
|
||||
#ifdef MODULES_VADDR
|
||||
unsigned long limit = (unsigned long)_etext - SZ_32M;
|
||||
|
||||
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
|
||||
|
||||
/* First try within 32M limit from _etext to avoid branch trampolines */
|
||||
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
|
||||
start = limit;
|
||||
fallback_start = MODULES_VADDR;
|
||||
fallback_end = MODULES_END;
|
||||
} else {
|
||||
start = MODULES_VADDR;
|
||||
}
|
||||
|
||||
end = MODULES_END;
|
||||
#else
|
||||
start = VMALLOC_START;
|
||||
end = VMALLOC_END;
|
||||
#endif
|
||||
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = start,
|
||||
.end = end,
|
||||
.pgprot = prot,
|
||||
.alignment = 1,
|
||||
.fallback_start = fallback_start,
|
||||
.fallback_end = fallback_end,
|
||||
},
|
||||
[EXECMEM_KPROBES] = {
|
||||
.start = VMALLOC_START,
|
||||
.end = VMALLOC_END,
|
||||
.pgprot = kprobes_prot,
|
||||
.alignment = 1,
|
||||
},
|
||||
[EXECMEM_MODULE_DATA] = {
|
||||
.start = VMALLOC_START,
|
||||
.end = VMALLOC_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -55,6 +55,9 @@
|
||||
#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
|
||||
#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
|
||||
#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
|
||||
#else
|
||||
#define MODULES_VADDR VMALLOC_START
|
||||
#define MODULES_END VMALLOC_END
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/alternative.h>
|
||||
@ -905,17 +904,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, 1, MODULES_VADDR,
|
||||
MODULES_END, GFP_KERNEL,
|
||||
PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
|
||||
NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
#endif
|
||||
|
||||
int module_finalize(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
struct module *me)
|
||||
|
@ -104,16 +104,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
void *alloc_insn_page(void)
|
||||
{
|
||||
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_KERNEL, PAGE_KERNEL_READ_EXEC,
|
||||
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* install breakpoint in text */
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/elf.h>
|
||||
#endif
|
||||
#include <linux/kfence.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/io.h>
|
||||
@ -1481,3 +1482,37 @@ void __init pgtable_cache_init(void)
|
||||
preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
#ifdef CONFIG_MMU
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = MODULES_VADDR,
|
||||
.end = MODULES_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = 1,
|
||||
},
|
||||
[EXECMEM_KPROBES] = {
|
||||
.start = VMALLOC_START,
|
||||
.end = VMALLOC_END,
|
||||
.pgprot = PAGE_KERNEL_READ_EXEC,
|
||||
.alignment = 1,
|
||||
},
|
||||
[EXECMEM_BPF] = {
|
||||
.start = BPF_JIT_REGION_START,
|
||||
.end = BPF_JIT_REGION_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = PAGE_SIZE,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -221,19 +221,6 @@ u64 bpf_jit_alloc_exec_limit(void)
|
||||
return BPF_JIT_REGION_SIZE;
|
||||
}
|
||||
|
||||
void *bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
||||
BPF_JIT_REGION_END, GFP_KERNEL,
|
||||
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void bpf_jit_free_exec(void *addr)
|
||||
{
|
||||
return vfree(addr);
|
||||
}
|
||||
|
||||
void *bpf_arch_text_copy(void *dst, void *src, size_t len)
|
||||
{
|
||||
int ret;
|
||||
|
@ -7,13 +7,13 @@
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <trace/syscall.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/text-patching.h>
|
||||
@ -220,7 +220,7 @@ static int __init ftrace_plt_init(void)
|
||||
{
|
||||
const char *start, *end;
|
||||
|
||||
ftrace_plt = module_alloc(PAGE_SIZE);
|
||||
ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE);
|
||||
if (!ftrace_plt)
|
||||
panic("cannot allocate ftrace plt\n");
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#define pr_fmt(fmt) "kprobes: " fmt
|
||||
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/preempt.h>
|
||||
@ -21,6 +20,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/dis.h>
|
||||
@ -38,7 +38,7 @@ void *alloc_insn_page(void)
|
||||
{
|
||||
void *page;
|
||||
|
||||
page = module_alloc(PAGE_SIZE);
|
||||
page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
|
||||
if (!page)
|
||||
return NULL;
|
||||
set_memory_rox((unsigned long)page, 1);
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/facility.h>
|
||||
@ -36,47 +37,10 @@
|
||||
|
||||
#define PLT_ENTRY_SIZE 22
|
||||
|
||||
static unsigned long get_module_load_offset(void)
|
||||
{
|
||||
static DEFINE_MUTEX(module_kaslr_mutex);
|
||||
static unsigned long module_load_offset;
|
||||
|
||||
if (!kaslr_enabled())
|
||||
return 0;
|
||||
/*
|
||||
* Calculate the module_load_offset the first time this code
|
||||
* is called. Once calculated it stays the same until reboot.
|
||||
*/
|
||||
mutex_lock(&module_kaslr_mutex);
|
||||
if (!module_load_offset)
|
||||
module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
|
||||
mutex_unlock(&module_kaslr_mutex);
|
||||
return module_load_offset;
|
||||
}
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
gfp_t gfp_mask = GFP_KERNEL;
|
||||
void *p;
|
||||
|
||||
if (PAGE_ALIGN(size) > MODULES_LEN)
|
||||
return NULL;
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
||||
MODULES_VADDR + get_module_load_offset(),
|
||||
MODULES_END, gfp_mask, PAGE_KERNEL,
|
||||
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
|
||||
NUMA_NO_NODE, __builtin_return_address(0));
|
||||
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
|
||||
vfree(p);
|
||||
return NULL;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
void module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
module_memfree(mod->arch.trampolines_start);
|
||||
execmem_free(mod->arch.trampolines_start);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -510,7 +474,7 @@ static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
|
||||
|
||||
size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
|
||||
numpages = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||
start = module_alloc(numpages * PAGE_SIZE);
|
||||
start = execmem_alloc(EXECMEM_FTRACE, numpages * PAGE_SIZE);
|
||||
if (!start)
|
||||
return -ENOMEM;
|
||||
set_memory_rox((unsigned long)start, numpages);
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include <asm/uv.h>
|
||||
#include <linux/virtio_anchor.h>
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
|
||||
pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
|
||||
@ -302,3 +303,32 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
vmem_remove_mapping(start, size);
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
unsigned long module_load_offset = 0;
|
||||
unsigned long start;
|
||||
|
||||
if (kaslr_enabled())
|
||||
module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
|
||||
|
||||
start = MODULES_VADDR + module_load_offset;
|
||||
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.flags = EXECMEM_KASAN_SHADOW,
|
||||
.start = start,
|
||||
.end = MODULES_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = MODULE_ALIGN,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
@ -432,6 +432,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
||||
|
||||
#define VMALLOC_START _AC(0xfe600000,UL)
|
||||
#define VMALLOC_END _AC(0xffc00000,UL)
|
||||
#define MODULES_VADDR VMALLOC_START
|
||||
#define MODULES_END VMALLOC_END
|
||||
|
||||
/* We provide our own get_unmapped_area to cope with VA holes for userland */
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
@ -21,36 +21,6 @@
|
||||
|
||||
#include "entry.h"
|
||||
|
||||
#ifdef CONFIG_SPARC64
|
||||
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
static void *module_map(unsigned long size)
|
||||
{
|
||||
if (PAGE_ALIGN(size) > MODULES_LEN)
|
||||
return NULL;
|
||||
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
|
||||
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
#else
|
||||
static void *module_map(unsigned long size)
|
||||
{
|
||||
return vmalloc(size);
|
||||
}
|
||||
#endif /* CONFIG_SPARC64 */
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
ret = module_map(size);
|
||||
if (ret)
|
||||
memset(ret, 0, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
|
||||
int module_frob_arch_sections(Elf_Ehdr *hdr,
|
||||
Elf_Shdr *sechdrs,
|
||||
|
@ -14,3 +14,5 @@ obj-$(CONFIG_SPARC32) += leon_mm.o
|
||||
|
||||
# Only used by sparc64
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
|
||||
obj-$(CONFIG_EXECMEM) += execmem.o
|
||||
|
21
arch/sparc/mm/execmem.c
Normal file
21
arch/sparc/mm/execmem.c
Normal file
@ -0,0 +1,21 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/mm.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.start = MODULES_VADDR,
|
||||
.end = MODULES_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ptrace.h>
|
||||
@ -713,7 +713,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
|
||||
if (unlikely(proglen + ilen > oldproglen)) {
|
||||
pr_err("bpb_jit_compile fatal error\n");
|
||||
kfree(addrs);
|
||||
module_memfree(image);
|
||||
execmem_free(image);
|
||||
return;
|
||||
}
|
||||
memcpy(image + proglen, temp, ilen);
|
||||
@ -736,7 +736,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
|
||||
break;
|
||||
}
|
||||
if (proglen == oldproglen) {
|
||||
image = module_alloc(proglen);
|
||||
image = execmem_alloc(EXECMEM_BPF, proglen);
|
||||
if (!image)
|
||||
goto out;
|
||||
}
|
||||
@ -758,7 +758,7 @@ out:
|
||||
void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
if (fp->jited)
|
||||
module_memfree(fp->bpf_func);
|
||||
execmem_free(fp->bpf_func);
|
||||
|
||||
bpf_prog_unlock_free(fp);
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ config X86_64
|
||||
select SWIOTLB
|
||||
select ARCH_HAS_ELFCORE_COMPAT
|
||||
select ZONE_DMA32
|
||||
select EXECMEM if DYNAMIC_FTRACE
|
||||
|
||||
config FORCE_DYNAMIC_FTRACE
|
||||
def_bool y
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/memory.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <trace/syscall.h>
|
||||
|
||||
@ -260,25 +261,14 @@ void arch_ftrace_update_code(int command)
|
||||
/* Currently only x86_64 supports dynamic trampolines */
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
#include <linux/moduleloader.h>
|
||||
/* Module allocation simplifies allocating memory for code */
|
||||
static inline void *alloc_tramp(unsigned long size)
|
||||
{
|
||||
return module_alloc(size);
|
||||
return execmem_alloc(EXECMEM_FTRACE, size);
|
||||
}
|
||||
static inline void tramp_free(void *tramp)
|
||||
{
|
||||
module_memfree(tramp);
|
||||
execmem_free(tramp);
|
||||
}
|
||||
#else
|
||||
/* Trampolines can only be created if modules are supported */
|
||||
static inline void *alloc_tramp(unsigned long size)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void tramp_free(void *tramp) { }
|
||||
#endif
|
||||
|
||||
/* Defined as markers to the end of the ftrace default trampolines */
|
||||
extern void ftrace_regs_caller_end(void);
|
||||
|
@ -40,12 +40,12 @@
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/objtool.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/cfi.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@ -495,7 +495,7 @@ void *alloc_insn_page(void)
|
||||
{
|
||||
void *page;
|
||||
|
||||
page = module_alloc(PAGE_SIZE);
|
||||
page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
@ -36,57 +36,6 @@ do { \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
static unsigned long module_load_offset;
|
||||
|
||||
/* Mutex protects the module_load_offset. */
|
||||
static DEFINE_MUTEX(module_kaslr_mutex);
|
||||
|
||||
static unsigned long int get_module_load_offset(void)
|
||||
{
|
||||
if (kaslr_enabled()) {
|
||||
mutex_lock(&module_kaslr_mutex);
|
||||
/*
|
||||
* Calculate the module_load_offset the first time this
|
||||
* code is called. Once calculated it stays the same until
|
||||
* reboot.
|
||||
*/
|
||||
if (module_load_offset == 0)
|
||||
module_load_offset =
|
||||
get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
|
||||
mutex_unlock(&module_kaslr_mutex);
|
||||
}
|
||||
return module_load_offset;
|
||||
}
|
||||
#else
|
||||
static unsigned long int get_module_load_offset(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
gfp_t gfp_mask = GFP_KERNEL;
|
||||
void *p;
|
||||
|
||||
if (PAGE_ALIGN(size) > MODULES_LEN)
|
||||
return NULL;
|
||||
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
||||
MODULES_VADDR + get_module_load_offset(),
|
||||
MODULES_END, gfp_mask, PAGE_KERNEL,
|
||||
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
|
||||
NUMA_NO_NODE, __builtin_return_address(0));
|
||||
|
||||
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
|
||||
vfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
int apply_relocate(Elf32_Shdr *sechdrs,
|
||||
const char *strtab,
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
@ -1095,3 +1096,31 @@ unsigned long arch_max_swapfile_size(void)
|
||||
return pages;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_EXECMEM
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
unsigned long start, offset = 0;
|
||||
|
||||
if (kaslr_enabled())
|
||||
offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
|
||||
|
||||
start = MODULES_VADDR + offset;
|
||||
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
.flags = EXECMEM_KASAN_SHADOW,
|
||||
.start = start,
|
||||
.end = MODULES_END,
|
||||
.pgprot = PAGE_KERNEL,
|
||||
.alignment = MODULE_ALIGN,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
return &execmem_info;
|
||||
}
|
||||
#endif /* CONFIG_EXECMEM */
|
||||
|
132
include/linux/execmem.h
Normal file
132
include/linux/execmem.h
Normal file
@ -0,0 +1,132 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_EXECMEM_ALLOC_H
|
||||
#define _LINUX_EXECMEM_ALLOC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/moduleloader.h>
|
||||
|
||||
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
|
||||
!defined(CONFIG_KASAN_VMALLOC)
|
||||
#include <linux/kasan.h>
|
||||
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#else
|
||||
#define MODULE_ALIGN PAGE_SIZE
|
||||
#endif
|
||||
|
||||
/**
|
||||
* enum execmem_type - types of executable memory ranges
|
||||
*
|
||||
* There are several subsystems that allocate executable memory.
|
||||
* Architectures define different restrictions on placement,
|
||||
* permissions, alignment and other parameters for memory that can be used
|
||||
* by these subsystems.
|
||||
* Types in this enum identify subsystems that allocate executable memory
|
||||
* and let architectures define parameters for ranges suitable for
|
||||
* allocations by each subsystem.
|
||||
*
|
||||
* @EXECMEM_DEFAULT: default parameters that would be used for types that
|
||||
* are not explicitly defined.
|
||||
* @EXECMEM_MODULE_TEXT: parameters for module text sections
|
||||
* @EXECMEM_KPROBES: parameters for kprobes
|
||||
* @EXECMEM_FTRACE: parameters for ftrace
|
||||
* @EXECMEM_BPF: parameters for BPF
|
||||
* @EXECMEM_MODULE_DATA: parameters for module data sections
|
||||
* @EXECMEM_TYPE_MAX:
|
||||
*/
|
||||
enum execmem_type {
|
||||
EXECMEM_DEFAULT,
|
||||
EXECMEM_MODULE_TEXT = EXECMEM_DEFAULT,
|
||||
EXECMEM_KPROBES,
|
||||
EXECMEM_FTRACE,
|
||||
EXECMEM_BPF,
|
||||
EXECMEM_MODULE_DATA,
|
||||
EXECMEM_TYPE_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum execmem_range_flags - options for executable memory allocations
|
||||
* @EXECMEM_KASAN_SHADOW: allocate kasan shadow
|
||||
*/
|
||||
enum execmem_range_flags {
|
||||
EXECMEM_KASAN_SHADOW = (1 << 0),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct execmem_range - definition of an address space suitable for code and
|
||||
* related data allocations
|
||||
* @start: address space start
|
||||
* @end: address space end (inclusive)
|
||||
* @fallback_start: start of the secondary address space range for fallback
|
||||
* allocations on architectures that require it
|
||||
* @fallback_end: start of the secondary address space (inclusive)
|
||||
* @pgprot: permissions for memory in this address space
|
||||
* @alignment: alignment required for text allocations
|
||||
* @flags: options for memory allocations for this range
|
||||
*/
|
||||
struct execmem_range {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned long fallback_start;
|
||||
unsigned long fallback_end;
|
||||
pgprot_t pgprot;
|
||||
unsigned int alignment;
|
||||
enum execmem_range_flags flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct execmem_info - architecture parameters for code allocations
|
||||
* @ranges: array of parameter sets defining architecture specific
|
||||
* parameters for executable memory allocations. The ranges that are not
|
||||
* explicitly initialized by an architecture use parameters defined for
|
||||
* @EXECMEM_DEFAULT.
|
||||
*/
|
||||
struct execmem_info {
|
||||
struct execmem_range ranges[EXECMEM_TYPE_MAX];
|
||||
};
|
||||
|
||||
/**
|
||||
* execmem_arch_setup - define parameters for allocations of executable memory
|
||||
*
|
||||
* A hook for architectures to define parameters for allocations of
|
||||
* executable memory. These parameters should be filled into the
|
||||
* @execmem_info structure.
|
||||
*
|
||||
* For architectures that do not implement this method a default set of
|
||||
* parameters will be used
|
||||
*
|
||||
* Return: a structure defining architecture parameters and restrictions
|
||||
* for allocations of executable memory
|
||||
*/
|
||||
struct execmem_info *execmem_arch_setup(void);
|
||||
|
||||
/**
|
||||
* execmem_alloc - allocate executable memory
|
||||
* @type: type of the allocation
|
||||
* @size: how many bytes of memory are required
|
||||
*
|
||||
* Allocates memory that will contain executable code, either generated or
|
||||
* loaded from kernel modules.
|
||||
*
|
||||
* Allocates memory that will contain data coupled with executable code,
|
||||
* like data sections in kernel modules.
|
||||
*
|
||||
* The memory will have protections defined by architecture for executable
|
||||
* region of the @type.
|
||||
*
|
||||
* Return: a pointer to the allocated memory or %NULL
|
||||
*/
|
||||
void *execmem_alloc(enum execmem_type type, size_t size);
|
||||
|
||||
/**
|
||||
* execmem_free - free executable memory
|
||||
* @ptr: pointer to the memory that should be freed
|
||||
*/
|
||||
void execmem_free(void *ptr);
|
||||
|
||||
#if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE)
|
||||
void execmem_init(void);
|
||||
#else
|
||||
static inline void execmem_init(void) {}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_EXECMEM_ALLOC_H */
|
@ -605,6 +605,11 @@ static inline bool module_is_live(struct module *mod)
|
||||
return mod->state != MODULE_STATE_GOING;
|
||||
}
|
||||
|
||||
static inline bool module_is_coming(struct module *mod)
|
||||
{
|
||||
return mod->state == MODULE_STATE_COMING;
|
||||
}
|
||||
|
||||
struct module *__module_text_address(unsigned long addr);
|
||||
struct module *__module_address(unsigned long addr);
|
||||
bool is_module_address(unsigned long addr);
|
||||
@ -857,6 +862,10 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline bool module_is_coming(struct module *mod)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
@ -25,13 +25,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
|
||||
/* Additional bytes needed by arch in front of individual sections */
|
||||
unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
|
||||
|
||||
/* Allocator used for allocating struct module, core sections and init
|
||||
sections. Returns NULL on failure. */
|
||||
void *module_alloc(unsigned long size);
|
||||
|
||||
/* Free memory returned from module_alloc. */
|
||||
void module_memfree(void *module_region);
|
||||
|
||||
/* Determines if the section name is an init section (that is only used during
|
||||
* module loading).
|
||||
*/
|
||||
@ -129,12 +122,4 @@ void module_arch_cleanup(struct module *mod);
|
||||
/* Any cleanup before freeing mod->module_init */
|
||||
void module_arch_freeing_init(struct module *mod);
|
||||
|
||||
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
|
||||
!defined(CONFIG_KASAN_VMALLOC)
|
||||
#include <linux/kasan.h>
|
||||
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#else
|
||||
#define MODULE_ALIGN PAGE_SIZE
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -43,7 +43,7 @@ config BPF_JIT
|
||||
bool "Enable BPF Just In Time compiler"
|
||||
depends on BPF
|
||||
depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
|
||||
depends on MODULES
|
||||
select EXECMEM
|
||||
help
|
||||
BPF programs are normally handled by a BPF interpreter. This option
|
||||
allows the kernel to generate native code when a program is loaded
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/objtool.h>
|
||||
@ -38,6 +37,7 @@
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/unaligned.h>
|
||||
@ -1065,12 +1065,12 @@ void bpf_jit_uncharge_modmem(u32 size)
|
||||
|
||||
void *__weak bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
return module_alloc(size);
|
||||
return execmem_alloc(EXECMEM_BPF, size);
|
||||
}
|
||||
|
||||
void __weak bpf_jit_free_exec(void *addr)
|
||||
{
|
||||
module_memfree(addr);
|
||||
execmem_free(addr);
|
||||
}
|
||||
|
||||
struct bpf_binary_header *
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/seq_file.h>
|
||||
@ -39,6 +38,7 @@
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@ -113,17 +113,17 @@ enum kprobe_slot_state {
|
||||
void __weak *alloc_insn_page(void)
|
||||
{
|
||||
/*
|
||||
* Use module_alloc() so this page is within +/- 2GB of where the
|
||||
* Use execmem_alloc() so this page is within +/- 2GB of where the
|
||||
* kernel image and loaded module images reside. This is required
|
||||
* for most of the architectures.
|
||||
* (e.g. x86-64 needs this to handle the %rip-relative fixups.)
|
||||
*/
|
||||
return module_alloc(PAGE_SIZE);
|
||||
return execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void free_insn_page(void *page)
|
||||
{
|
||||
module_memfree(page);
|
||||
execmem_free(page);
|
||||
}
|
||||
|
||||
struct kprobe_insn_cache kprobe_insn_slots = {
|
||||
@ -1588,7 +1588,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
}
|
||||
|
||||
/* Get module refcount and reject __init functions for loaded modules. */
|
||||
if (*probed_mod) {
|
||||
if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
|
||||
/*
|
||||
* We must hold a refcount of the probed module while updating
|
||||
* its code to prohibit unexpected unloading.
|
||||
@ -1603,12 +1603,13 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
* kprobes in there.
|
||||
*/
|
||||
if (within_module_init((unsigned long)p->addr, *probed_mod) &&
|
||||
(*probed_mod)->state != MODULE_STATE_COMING) {
|
||||
!module_is_coming(*probed_mod)) {
|
||||
module_put(*probed_mod);
|
||||
*probed_mod = NULL;
|
||||
ret = -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
jump_label_unlock();
|
||||
@ -2488,24 +2489,6 @@ int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Remove all symbols in given area from kprobe blacklist */
|
||||
static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct kprobe_blacklist_entry *ent, *n;
|
||||
|
||||
list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
|
||||
if (ent->start_addr < start || ent->start_addr >= end)
|
||||
continue;
|
||||
list_del(&ent->list);
|
||||
kfree(ent);
|
||||
}
|
||||
}
|
||||
|
||||
static void kprobe_remove_ksym_blacklist(unsigned long entry)
|
||||
{
|
||||
kprobe_remove_area_blacklist(entry, entry + 1);
|
||||
}
|
||||
|
||||
int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
|
||||
char *type, char *sym)
|
||||
{
|
||||
@ -2570,6 +2553,25 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
|
||||
return ret ? : arch_populate_kprobe_blacklist();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
/* Remove all symbols in given area from kprobe blacklist */
|
||||
static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct kprobe_blacklist_entry *ent, *n;
|
||||
|
||||
list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
|
||||
if (ent->start_addr < start || ent->start_addr >= end)
|
||||
continue;
|
||||
list_del(&ent->list);
|
||||
kfree(ent);
|
||||
}
|
||||
}
|
||||
|
||||
static void kprobe_remove_ksym_blacklist(unsigned long entry)
|
||||
{
|
||||
kprobe_remove_area_blacklist(entry, entry + 1);
|
||||
}
|
||||
|
||||
static void add_module_kprobe_blacklist(struct module *mod)
|
||||
{
|
||||
unsigned long start, end;
|
||||
@ -2672,6 +2674,17 @@ static struct notifier_block kprobe_module_nb = {
|
||||
.priority = 0
|
||||
};
|
||||
|
||||
static int kprobe_register_module_notifier(void)
|
||||
{
|
||||
return register_module_notifier(&kprobe_module_nb);
|
||||
}
|
||||
#else
|
||||
static int kprobe_register_module_notifier(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
void kprobe_free_init_mem(void)
|
||||
{
|
||||
void *start = (void *)(&__init_begin);
|
||||
@ -2731,7 +2744,7 @@ static int __init init_kprobes(void)
|
||||
if (!err)
|
||||
err = register_die_notifier(&kprobe_exceptions_nb);
|
||||
if (!err)
|
||||
err = register_module_notifier(&kprobe_module_nb);
|
||||
err = kprobe_register_module_notifier();
|
||||
|
||||
kprobes_initialized = (err == 0);
|
||||
kprobe_sysctls_init();
|
||||
|
@ -2,6 +2,7 @@
|
||||
menuconfig MODULES
|
||||
bool "Enable loadable module support"
|
||||
modules
|
||||
select EXECMEM
|
||||
help
|
||||
Kernel modules are small pieces of compiled code which can
|
||||
be inserted in the running kernel, rather than being
|
||||
@ -392,7 +393,7 @@ config UNUSED_KSYMS_WHITELIST
|
||||
exported at all times, even in absence of in-tree users. The value to
|
||||
set here is the path to a text file containing the list of symbols,
|
||||
one per line. The path can be absolute, or relative to the kernel
|
||||
source tree.
|
||||
source or obj tree.
|
||||
|
||||
config MODULES_TREE_LOOKUP
|
||||
def_bool y
|
||||
|
@ -348,7 +348,7 @@ const char *module_address_lookup(unsigned long addr,
|
||||
}
|
||||
/* Make a copy in here where it's safe */
|
||||
if (ret) {
|
||||
strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
|
||||
strscpy(namebuf, ret, KSYM_NAME_LEN);
|
||||
ret = namebuf;
|
||||
}
|
||||
preempt_enable();
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include <linux/audit.h>
|
||||
#include <linux/cfi.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <uapi/linux/module.h>
|
||||
#include "internal.h"
|
||||
|
||||
@ -1179,16 +1180,6 @@ resolve_symbol_wait(struct module *mod,
|
||||
return ksym;
|
||||
}
|
||||
|
||||
void __weak module_memfree(void *module_region)
|
||||
{
|
||||
/*
|
||||
* This memory may be RO, and freeing RO memory in an interrupt is not
|
||||
* supported by vmalloc.
|
||||
*/
|
||||
WARN_ON(in_interrupt());
|
||||
vfree(module_region);
|
||||
}
|
||||
|
||||
void __weak module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
}
|
||||
@ -1197,25 +1188,47 @@ void __weak module_arch_freeing_init(struct module *mod)
|
||||
{
|
||||
}
|
||||
|
||||
static bool mod_mem_use_vmalloc(enum mod_mem_type type)
|
||||
static int module_memory_alloc(struct module *mod, enum mod_mem_type type)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC) &&
|
||||
mod_mem_type_is_core_data(type);
|
||||
}
|
||||
unsigned int size = PAGE_ALIGN(mod->mem[type].size);
|
||||
enum execmem_type execmem_type;
|
||||
void *ptr;
|
||||
|
||||
static void *module_memory_alloc(unsigned int size, enum mod_mem_type type)
|
||||
{
|
||||
if (mod_mem_use_vmalloc(type))
|
||||
return vzalloc(size);
|
||||
return module_alloc(size);
|
||||
}
|
||||
mod->mem[type].size = size;
|
||||
|
||||
static void module_memory_free(void *ptr, enum mod_mem_type type)
|
||||
{
|
||||
if (mod_mem_use_vmalloc(type))
|
||||
vfree(ptr);
|
||||
if (mod_mem_type_is_data(type))
|
||||
execmem_type = EXECMEM_MODULE_DATA;
|
||||
else
|
||||
module_memfree(ptr);
|
||||
execmem_type = EXECMEM_MODULE_TEXT;
|
||||
|
||||
ptr = execmem_alloc(execmem_type, size);
|
||||
if (!ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* The pointer to these blocks of memory are stored on the module
|
||||
* structure and we keep that around so long as the module is
|
||||
* around. We only free that memory when we unload the module.
|
||||
* Just mark them as not being a leak then. The .init* ELF
|
||||
* sections *do* get freed after boot so we *could* treat them
|
||||
* slightly differently with kmemleak_ignore() and only grey
|
||||
* them out as they work as typical memory allocations which
|
||||
* *do* eventually get freed, but let's just keep things simple
|
||||
* and avoid *any* false positives.
|
||||
*/
|
||||
kmemleak_not_leak(ptr);
|
||||
|
||||
memset(ptr, 0, size);
|
||||
mod->mem[type].base = ptr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void module_memory_free(struct module *mod, enum mod_mem_type type)
|
||||
{
|
||||
void *ptr = mod->mem[type].base;
|
||||
|
||||
execmem_free(ptr);
|
||||
}
|
||||
|
||||
static void free_mod_mem(struct module *mod)
|
||||
@ -1229,12 +1242,12 @@ static void free_mod_mem(struct module *mod)
|
||||
/* Free lock-classes; relies on the preceding sync_rcu(). */
|
||||
lockdep_free_key_range(mod_mem->base, mod_mem->size);
|
||||
if (mod_mem->size)
|
||||
module_memory_free(mod_mem->base, type);
|
||||
module_memory_free(mod, type);
|
||||
}
|
||||
|
||||
/* MOD_DATA hosts mod, so free it at last */
|
||||
lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size);
|
||||
module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA);
|
||||
module_memory_free(mod, MOD_DATA);
|
||||
}
|
||||
|
||||
/* Free a module, remove from lists, etc. */
|
||||
@ -1610,13 +1623,6 @@ static void free_modinfo(struct module *mod)
|
||||
}
|
||||
}
|
||||
|
||||
void * __weak module_alloc(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
|
||||
NUMA_NO_NODE, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
bool __weak module_init_section(const char *name)
|
||||
{
|
||||
return strstarts(name, ".init");
|
||||
@ -2225,7 +2231,6 @@ static int find_module_sections(struct module *mod, struct load_info *info)
|
||||
static int move_module(struct module *mod, struct load_info *info)
|
||||
{
|
||||
int i;
|
||||
void *ptr;
|
||||
enum mod_mem_type t = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
@ -2234,26 +2239,12 @@ static int move_module(struct module *mod, struct load_info *info)
|
||||
mod->mem[type].base = NULL;
|
||||
continue;
|
||||
}
|
||||
mod->mem[type].size = PAGE_ALIGN(mod->mem[type].size);
|
||||
ptr = module_memory_alloc(mod->mem[type].size, type);
|
||||
/*
|
||||
* The pointer to these blocks of memory are stored on the module
|
||||
* structure and we keep that around so long as the module is
|
||||
* around. We only free that memory when we unload the module.
|
||||
* Just mark them as not being a leak then. The .init* ELF
|
||||
* sections *do* get freed after boot so we *could* treat them
|
||||
* slightly differently with kmemleak_ignore() and only grey
|
||||
* them out as they work as typical memory allocations which
|
||||
* *do* eventually get freed, but let's just keep things simple
|
||||
* and avoid *any* false positives.
|
||||
*/
|
||||
kmemleak_not_leak(ptr);
|
||||
if (!ptr) {
|
||||
|
||||
ret = module_memory_alloc(mod, type);
|
||||
if (ret) {
|
||||
t = type;
|
||||
goto out_enomem;
|
||||
}
|
||||
memset(ptr, 0, mod->mem[type].size);
|
||||
mod->mem[type].base = ptr;
|
||||
}
|
||||
|
||||
/* Transfer each section which specifies SHF_ALLOC */
|
||||
@ -2296,7 +2287,7 @@ static int move_module(struct module *mod, struct load_info *info)
|
||||
return 0;
|
||||
out_enomem:
|
||||
for (t--; t >= 0; t--)
|
||||
module_memory_free(mod->mem[t].base, t);
|
||||
module_memory_free(mod, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2482,9 +2473,9 @@ static void do_free_init(struct work_struct *w)
|
||||
|
||||
llist_for_each_safe(pos, n, list) {
|
||||
initfree = container_of(pos, struct mod_initfree, node);
|
||||
module_memfree(initfree->init_text);
|
||||
module_memfree(initfree->init_data);
|
||||
module_memfree(initfree->init_rodata);
|
||||
execmem_free(initfree->init_text);
|
||||
execmem_free(initfree->init_data);
|
||||
execmem_free(initfree->init_rodata);
|
||||
kfree(initfree);
|
||||
}
|
||||
}
|
||||
@ -2594,10 +2585,10 @@ static noinline int do_init_module(struct module *mod)
|
||||
* We want to free module_init, but be aware that kallsyms may be
|
||||
* walking this with preempt disabled. In all the failure paths, we
|
||||
* call synchronize_rcu(), but we don't want to slow down the success
|
||||
* path. module_memfree() cannot be called in an interrupt, so do the
|
||||
* path. execmem_free() cannot be called in an interrupt, so do the
|
||||
* work and call synchronize_rcu() in a work queue.
|
||||
*
|
||||
* Note that module_alloc() on most architectures creates W+X page
|
||||
* Note that execmem_alloc() on most architectures creates W+X page
|
||||
* mappings which won't be cleaned up until do_free_init() runs. Any
|
||||
* code such as mark_rodata_ro() which depends on those mappings to
|
||||
* be cleaned up needs to sync with the queued work by invoking
|
||||
|
@ -111,6 +111,7 @@ static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
|
||||
return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
|
||||
{
|
||||
char *p;
|
||||
@ -129,6 +130,12 @@ static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool trace_kprobe_is_busy(struct dyn_event *ev)
|
||||
{
|
||||
@ -670,6 +677,7 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
/* Module notifier call back, checking event on the module */
|
||||
static int trace_kprobe_module_callback(struct notifier_block *nb,
|
||||
unsigned long val, void *data)
|
||||
@ -704,6 +712,16 @@ static struct notifier_block trace_kprobe_module_nb = {
|
||||
.notifier_call = trace_kprobe_module_callback,
|
||||
.priority = 1 /* Invoked after kprobe module callback */
|
||||
};
|
||||
static int trace_kprobe_register_module_notifier(void)
|
||||
{
|
||||
return register_module_notifier(&trace_kprobe_module_nb);
|
||||
}
|
||||
#else
|
||||
static int trace_kprobe_register_module_notifier(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
static int count_symbols(void *data, unsigned long unused)
|
||||
{
|
||||
@ -1933,7 +1951,7 @@ static __init int init_kprobe_trace_early(void)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (register_module_notifier(&trace_kprobe_module_nb))
|
||||
if (trace_kprobe_register_module_notifier())
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -1240,6 +1240,9 @@ config LOCK_MM_AND_FIND_VMA
|
||||
config IOMMU_MM_DATA
|
||||
bool
|
||||
|
||||
config EXECMEM
|
||||
bool
|
||||
|
||||
source "mm/damon/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
@ -133,3 +133,4 @@ obj-$(CONFIG_IO_MAPPING) += io-mapping.o
|
||||
obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o
|
||||
obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o
|
||||
obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o
|
||||
obj-$(CONFIG_EXECMEM) += execmem.o
|
||||
|
143
mm/execmem.c
Normal file
143
mm/execmem.c
Normal file
@ -0,0 +1,143 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2002 Richard Henderson
|
||||
* Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
|
||||
* Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
|
||||
* Copyright (C) 2024 Mike Rapoport IBM.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <linux/moduleloader.h>
|
||||
|
||||
static struct execmem_info *execmem_info __ro_after_init;
|
||||
static struct execmem_info default_execmem_info __ro_after_init;
|
||||
|
||||
static void *__execmem_alloc(struct execmem_range *range, size_t size)
|
||||
{
|
||||
bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
|
||||
unsigned long vm_flags = VM_FLUSH_RESET_PERMS;
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN;
|
||||
unsigned long start = range->start;
|
||||
unsigned long end = range->end;
|
||||
unsigned int align = range->alignment;
|
||||
pgprot_t pgprot = range->pgprot;
|
||||
void *p;
|
||||
|
||||
if (kasan)
|
||||
vm_flags |= VM_DEFER_KMEMLEAK;
|
||||
|
||||
p = __vmalloc_node_range(size, align, start, end, gfp_flags,
|
||||
pgprot, vm_flags, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
if (!p && range->fallback_start) {
|
||||
start = range->fallback_start;
|
||||
end = range->fallback_end;
|
||||
p = __vmalloc_node_range(size, align, start, end, gfp_flags,
|
||||
pgprot, vm_flags, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
if (!p) {
|
||||
pr_warn_ratelimited("execmem: unable to allocate memory\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (kasan && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
|
||||
vfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return kasan_reset_tag(p);
|
||||
}
|
||||
|
||||
void *execmem_alloc(enum execmem_type type, size_t size)
|
||||
{
|
||||
struct execmem_range *range = &execmem_info->ranges[type];
|
||||
|
||||
return __execmem_alloc(range, size);
|
||||
}
|
||||
|
||||
void execmem_free(void *ptr)
|
||||
{
|
||||
/*
|
||||
* This memory may be RO, and freeing RO memory in an interrupt is not
|
||||
* supported by vmalloc.
|
||||
*/
|
||||
WARN_ON(in_interrupt());
|
||||
vfree(ptr);
|
||||
}
|
||||
|
||||
static bool execmem_validate(struct execmem_info *info)
|
||||
{
|
||||
struct execmem_range *r = &info->ranges[EXECMEM_DEFAULT];
|
||||
|
||||
if (!r->alignment || !r->start || !r->end || !pgprot_val(r->pgprot)) {
|
||||
pr_crit("Invalid parameters for execmem allocator, module loading will fail");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void execmem_init_missing(struct execmem_info *info)
|
||||
{
|
||||
struct execmem_range *default_range = &info->ranges[EXECMEM_DEFAULT];
|
||||
|
||||
for (int i = EXECMEM_DEFAULT + 1; i < EXECMEM_TYPE_MAX; i++) {
|
||||
struct execmem_range *r = &info->ranges[i];
|
||||
|
||||
if (!r->start) {
|
||||
if (i == EXECMEM_MODULE_DATA)
|
||||
r->pgprot = PAGE_KERNEL;
|
||||
else
|
||||
r->pgprot = default_range->pgprot;
|
||||
r->alignment = default_range->alignment;
|
||||
r->start = default_range->start;
|
||||
r->end = default_range->end;
|
||||
r->flags = default_range->flags;
|
||||
r->fallback_start = default_range->fallback_start;
|
||||
r->fallback_end = default_range->fallback_end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct execmem_info * __weak execmem_arch_setup(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __init __execmem_init(void)
|
||||
{
|
||||
struct execmem_info *info = execmem_arch_setup();
|
||||
|
||||
if (!info) {
|
||||
info = execmem_info = &default_execmem_info;
|
||||
info->ranges[EXECMEM_DEFAULT].start = VMALLOC_START;
|
||||
info->ranges[EXECMEM_DEFAULT].end = VMALLOC_END;
|
||||
info->ranges[EXECMEM_DEFAULT].pgprot = PAGE_KERNEL_EXEC;
|
||||
info->ranges[EXECMEM_DEFAULT].alignment = 1;
|
||||
}
|
||||
|
||||
if (!execmem_validate(info))
|
||||
return;
|
||||
|
||||
execmem_init_missing(info);
|
||||
|
||||
execmem_info = info;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_WANTS_EXECMEM_LATE
|
||||
static int __init execmem_late_init(void)
|
||||
{
|
||||
__execmem_init();
|
||||
return 0;
|
||||
}
|
||||
core_initcall(execmem_late_init);
|
||||
#else
|
||||
void __init execmem_init(void)
|
||||
{
|
||||
__execmem_init();
|
||||
}
|
||||
#endif
|
@ -27,6 +27,7 @@
|
||||
#include <linux/swap.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/execmem.h>
|
||||
#include "internal.h"
|
||||
#include "slab.h"
|
||||
#include "shuffle.h"
|
||||
@ -2793,4 +2794,5 @@ void __init mm_core_init(void)
|
||||
pti_init();
|
||||
kmsan_init_runtime();
|
||||
mm_cache_init();
|
||||
execmem_init();
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ targets += .vmlinux.objs
|
||||
|
||||
ifdef CONFIG_TRIM_UNUSED_KSYMS
|
||||
ksym-wl := $(CONFIG_UNUSED_KSYMS_WHITELIST)
|
||||
ksym-wl := $(if $(filter-out /%, $(ksym-wl)),$(srctree)/)$(ksym-wl)
|
||||
ksym-wl := $(if $(filter-out /%, $(ksym-wl)),$(if $(wildcard $(ksym-wl)),,$(srctree)/))$(ksym-wl)
|
||||
modpost-args += -t $(addprefix -u , $(ksym-wl))
|
||||
modpost-deps += $(ksym-wl)
|
||||
endif
|
||||
|
Loading…
Reference in New Issue
Block a user