mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
s390/kasan: dynamic shadow mem allocation for modules
Move from modules area entire shadow memory preallocation to dynamic
allocation per module load.
This behaivior has been introduced for x86 with bebf56a1b
: "This patch
also forces module_alloc() to return 8*PAGE_SIZE aligned address making
shadow memory handling ( kasan_module_alloc()/kasan_module_free() )
more simple. Such alignment guarantees that each shadow page backing
modules address space correspond to only one module_alloc() allocation"
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
0dac8f6bc3
commit
793213a82d
@ -16,6 +16,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/alternative.h>
|
||||
@ -32,12 +33,18 @@
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (PAGE_ALIGN(size) > MODULES_LEN)
|
||||
return NULL;
|
||||
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
|
||||
GFP_KERNEL, PAGE_KERNEL_EXEC,
|
||||
0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
|
||||
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
if (p && (kasan_module_alloc(p, size) < 0)) {
|
||||
vfree(p);
|
||||
return NULL;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
void module_arch_freeing_init(struct module *mod)
|
||||
|
@ -214,8 +214,6 @@ void __init kasan_early_init(void)
|
||||
|
||||
memsize = min(max_physmem_end, KASAN_SHADOW_START);
|
||||
shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
|
||||
if (IS_ENABLED(CONFIG_MODULES))
|
||||
shadow_alloc_size += MODULES_LEN >> KASAN_SHADOW_SCALE_SHIFT;
|
||||
pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
|
||||
initrd_end =
|
||||
@ -239,18 +237,15 @@ void __init kasan_early_init(void)
|
||||
* +- shadow end -+ | mapping |
|
||||
* | ... gap ... |\ | (untracked) |
|
||||
* +- modules vaddr -+ \ +----------------+
|
||||
* | 2Gb | \| 256Mb |
|
||||
* | 2Gb | \| unmapped | allocated per module
|
||||
* +-----------------+ +- shadow end ---+
|
||||
*/
|
||||
/* populate identity mapping */
|
||||
kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
|
||||
/* populate kasan shadow (for identity mapping / modules / zero page) */
|
||||
/* populate kasan shadow (for identity mapping and zero page mapping) */
|
||||
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
|
||||
if (IS_ENABLED(CONFIG_MODULES)) {
|
||||
if (IS_ENABLED(CONFIG_MODULES))
|
||||
untracked_mem_end = vmax - MODULES_LEN;
|
||||
kasan_early_vmemmap_populate(__sha(untracked_mem_end),
|
||||
__sha(vmax), POPULATE_MAP);
|
||||
}
|
||||
kasan_early_vmemmap_populate(__sha(memsize), __sha(untracked_mem_end),
|
||||
POPULATE_ZERO_SHADOW);
|
||||
kasan_set_pgd(early_pg_dir, asce_type);
|
||||
|
Loading…
Reference in New Issue
Block a user