Cover kernel addresses above 0x90000000 by the shadow map. Enable HAVE_ARCH_KASAN when MMU is enabled. Provide kasan_early_init that fills shadow map with writable copies of kasan_zero_page. Call kasan_early_init right after mmu initialization in the setup_arch. Provide kasan_init that allocates proper shadow map pages from the memblock and puts these pages into the shadow map for addresses from VMALLOC area to the end of KSEG. Call kasan_init right after memblock initialization. Don't use KASAN for the boot code, MMU and KASAN initialization and page fault handler. Make kernel stack size 4 times larger when KASAN is enabled to avoid stack overflows. GCC 7.3, 8 or newer is required to build the xtensa kernel with KASAN. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
38 lines
823 B
C
38 lines
823 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_KASAN_H
|
|
#define __ASM_KASAN_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/sizes.h>
|
|
#include <asm/kmem_layout.h>
|
|
|
|
/* Start of area covered by KASAN */
|
|
#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
|
|
/* Start of the shadow map */
|
|
#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
|
|
/* Size of the shadow map */
|
|
#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
|
|
/* Offset for mem to shadow address transformation */
|
|
#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
|
|
|
|
void __init kasan_early_init(void);
|
|
void __init kasan_init(void);
|
|
|
|
#else
|
|
|
|
static inline void kasan_early_init(void)
|
|
{
|
|
}
|
|
|
|
static inline void kasan_init(void)
|
|
{
|
|
}
|
|
|
|
#endif
|
|
#endif
|
|
#endif
|