forked from Minki/linux
xtensa: add support for KASAN
Cover kernel addresses above 0x90000000 by the shadow map. Enable HAVE_ARCH_KASAN when MMU is enabled. Provide kasan_early_init that fills shadow map with writable copies of kasan_zero_page. Call kasan_early_init right after mmu initialization in the setup_arch. Provide kasan_init that allocates proper shadow map pages from the memblock and puts these pages into the shadow map for addresses from VMALLOC area to the end of KSEG. Call kasan_init right after memblock initialization. Don't use KASAN for the boot code, MMU and KASAN initialization and page fault handler. Make kernel stack size 4 times larger when KASAN is enabled to avoid stack overflows. GCC 7.3, 8 or newer is required to build the xtensa kernel with KASAN. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
parent
1af1e8a39d
commit
c633544a61
@ -35,5 +35,5 @@
|
|||||||
| um: | TODO |
|
| um: | TODO |
|
||||||
| unicore32: | TODO |
|
| unicore32: | TODO |
|
||||||
| x86: | ok |
|
| x86: | ok |
|
||||||
| xtensa: | TODO |
|
| xtensa: | ok |
|
||||||
-----------------------
|
-----------------------
|
||||||
|
@ -71,6 +71,8 @@ Default MMUv2-compatible layout.
|
|||||||
+------------------+
|
+------------------+
|
||||||
| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
|
| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
|
||||||
+------------------+
|
+------------------+
|
||||||
|
| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
|
||||||
|
+------------------+ 0x8e400000
|
||||||
+------------------+
|
+------------------+
|
||||||
| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB
|
| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB
|
||||||
+------------------+ VMALLOC_END
|
+------------------+ VMALLOC_END
|
||||||
@ -111,6 +113,8 @@ Default MMUv2-compatible layout.
|
|||||||
+------------------+
|
+------------------+
|
||||||
| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
|
| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
|
||||||
+------------------+
|
+------------------+
|
||||||
|
| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
|
||||||
|
+------------------+ 0x8e400000
|
||||||
+------------------+
|
+------------------+
|
||||||
| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB
|
| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB
|
||||||
+------------------+ VMALLOC_END
|
+------------------+ VMALLOC_END
|
||||||
@ -152,6 +156,8 @@ Default MMUv2-compatible layout.
|
|||||||
+------------------+
|
+------------------+
|
||||||
| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
|
| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
|
||||||
+------------------+
|
+------------------+
|
||||||
|
| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
|
||||||
|
+------------------+ 0x8e400000
|
||||||
+------------------+
|
+------------------+
|
||||||
| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB
|
| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB
|
||||||
+------------------+ VMALLOC_END
|
+------------------+ VMALLOC_END
|
||||||
|
@ -15,6 +15,7 @@ config XTENSA
|
|||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
select GENERIC_PCI_IOMAP
|
select GENERIC_PCI_IOMAP
|
||||||
select GENERIC_SCHED_CLOCK
|
select GENERIC_SCHED_CLOCK
|
||||||
|
select HAVE_ARCH_KASAN if MMU
|
||||||
select HAVE_CC_STACKPROTECTOR
|
select HAVE_CC_STACKPROTECTOR
|
||||||
select HAVE_DEBUG_KMEMLEAK
|
select HAVE_DEBUG_KMEMLEAK
|
||||||
select HAVE_DMA_API_DEBUG
|
select HAVE_DMA_API_DEBUG
|
||||||
@ -80,6 +81,10 @@ config VARIANT_IRQ_SWITCH
|
|||||||
config HAVE_XTENSA_GPIO32
|
config HAVE_XTENSA_GPIO32
|
||||||
def_bool n
|
def_bool n
|
||||||
|
|
||||||
|
config KASAN_SHADOW_OFFSET
|
||||||
|
hex
|
||||||
|
default 0x6e400000
|
||||||
|
|
||||||
menu "Processor type and features"
|
menu "Processor type and features"
|
||||||
|
|
||||||
choice
|
choice
|
||||||
|
@ -15,6 +15,8 @@ CFLAGS_REMOVE_inftrees.o = -pg
|
|||||||
CFLAGS_REMOVE_inffast.o = -pg
|
CFLAGS_REMOVE_inffast.o = -pg
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
KASAN_SANITIZE := n
|
||||||
|
|
||||||
CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
|
CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
|
||||||
CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
|
CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
|
||||||
CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong
|
CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong
|
||||||
|
37
arch/xtensa/include/asm/kasan.h
Normal file
37
arch/xtensa/include/asm/kasan.h
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef __ASM_KASAN_H
|
||||||
|
#define __ASM_KASAN_H
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
#ifdef CONFIG_KASAN
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/sizes.h>
|
||||||
|
#include <asm/kmem_layout.h>
|
||||||
|
|
||||||
|
/* Start of area covered by KASAN */
|
||||||
|
#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
|
||||||
|
/* Start of the shadow map */
|
||||||
|
#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
|
||||||
|
/* Size of the shadow map */
|
||||||
|
#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
|
||||||
|
/* Offset for mem to shadow address transformation */
|
||||||
|
#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
|
||||||
|
|
||||||
|
void __init kasan_early_init(void);
|
||||||
|
void __init kasan_init(void);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline void kasan_early_init(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kasan_init(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
@ -71,7 +71,11 @@
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef CONFIG_KASAN
|
||||||
#define KERNEL_STACK_SHIFT 13
|
#define KERNEL_STACK_SHIFT 13
|
||||||
|
#else
|
||||||
|
#define KERNEL_STACK_SHIFT 15
|
||||||
|
#endif
|
||||||
#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
|
#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -12,9 +12,9 @@
|
|||||||
#define _XTENSA_PGTABLE_H
|
#define _XTENSA_PGTABLE_H
|
||||||
|
|
||||||
#define __ARCH_USE_5LEVEL_HACK
|
#define __ARCH_USE_5LEVEL_HACK
|
||||||
#include <asm-generic/pgtable-nopmd.h>
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/kmem_layout.h>
|
#include <asm/kmem_layout.h>
|
||||||
|
#include <asm-generic/pgtable-nopmd.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We only use two ring levels, user and kernel space.
|
* We only use two ring levels, user and kernel space.
|
||||||
@ -170,6 +170,7 @@
|
|||||||
#define PAGE_SHARED_EXEC \
|
#define PAGE_SHARED_EXEC \
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
|
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
|
||||||
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
|
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
|
||||||
|
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
|
||||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
|
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
|
||||||
|
|
||||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||||
|
@ -108,14 +108,33 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
|
|||||||
|
|
||||||
#define __HAVE_ARCH_MEMSET
|
#define __HAVE_ARCH_MEMSET
|
||||||
extern void *memset(void *__s, int __c, size_t __count);
|
extern void *memset(void *__s, int __c, size_t __count);
|
||||||
|
extern void *__memset(void *__s, int __c, size_t __count);
|
||||||
|
|
||||||
#define __HAVE_ARCH_MEMCPY
|
#define __HAVE_ARCH_MEMCPY
|
||||||
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
|
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
|
||||||
|
extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
|
||||||
|
|
||||||
#define __HAVE_ARCH_MEMMOVE
|
#define __HAVE_ARCH_MEMMOVE
|
||||||
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
|
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
|
||||||
|
extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
|
||||||
|
|
||||||
/* Don't build bcopy at all ... */
|
/* Don't build bcopy at all ... */
|
||||||
#define __HAVE_ARCH_BCOPY
|
#define __HAVE_ARCH_BCOPY
|
||||||
|
|
||||||
|
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For files that are not instrumented (e.g. mm/slub.c) we
|
||||||
|
* should use not instrumented version of mem* functions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define memcpy(dst, src, len) __memcpy(dst, src, len)
|
||||||
|
#define memmove(dst, src, len) __memmove(dst, src, len)
|
||||||
|
#define memset(s, c, n) __memset(s, c, n)
|
||||||
|
|
||||||
|
#ifndef __NO_FORTIFY
|
||||||
|
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _XTENSA_STRING_H */
|
#endif /* _XTENSA_STRING_H */
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <asm/bootparam.h>
|
#include <asm/bootparam.h>
|
||||||
|
#include <asm/kasan.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
@ -251,6 +252,10 @@ void __init init_arch(bp_tag_t *bp_start)
|
|||||||
|
|
||||||
init_mmu();
|
init_mmu();
|
||||||
|
|
||||||
|
/* Initialize initial KASAN shadow map */
|
||||||
|
|
||||||
|
kasan_early_init();
|
||||||
|
|
||||||
/* Parse boot parameters */
|
/* Parse boot parameters */
|
||||||
|
|
||||||
if (bp_start)
|
if (bp_start)
|
||||||
@ -388,7 +393,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
#endif
|
#endif
|
||||||
parse_early_param();
|
parse_early_param();
|
||||||
bootmem_init();
|
bootmem_init();
|
||||||
|
kasan_init();
|
||||||
unflatten_and_copy_device_tree();
|
unflatten_and_copy_device_tree();
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -41,6 +41,9 @@
|
|||||||
EXPORT_SYMBOL(memset);
|
EXPORT_SYMBOL(memset);
|
||||||
EXPORT_SYMBOL(memcpy);
|
EXPORT_SYMBOL(memcpy);
|
||||||
EXPORT_SYMBOL(memmove);
|
EXPORT_SYMBOL(memmove);
|
||||||
|
EXPORT_SYMBOL(__memset);
|
||||||
|
EXPORT_SYMBOL(__memcpy);
|
||||||
|
EXPORT_SYMBOL(__memmove);
|
||||||
EXPORT_SYMBOL(__strncpy_user);
|
EXPORT_SYMBOL(__strncpy_user);
|
||||||
EXPORT_SYMBOL(clear_page);
|
EXPORT_SYMBOL(clear_page);
|
||||||
EXPORT_SYMBOL(copy_page);
|
EXPORT_SYMBOL(copy_page);
|
||||||
|
@ -109,7 +109,8 @@
|
|||||||
addi a5, a5, 2
|
addi a5, a5, 2
|
||||||
j .Ldstaligned # dst is now aligned, return to main algorithm
|
j .Ldstaligned # dst is now aligned, return to main algorithm
|
||||||
|
|
||||||
ENTRY(memcpy)
|
ENTRY(__memcpy)
|
||||||
|
WEAK(memcpy)
|
||||||
|
|
||||||
entry sp, 16 # minimal stack frame
|
entry sp, 16 # minimal stack frame
|
||||||
# a2/ dst, a3/ src, a4/ len
|
# a2/ dst, a3/ src, a4/ len
|
||||||
@ -271,7 +272,7 @@ ENTRY(memcpy)
|
|||||||
s8i a6, a5, 0
|
s8i a6, a5, 0
|
||||||
retw
|
retw
|
||||||
|
|
||||||
ENDPROC(memcpy)
|
ENDPROC(__memcpy)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void bcopy(const void *src, void *dest, size_t n);
|
* void bcopy(const void *src, void *dest, size_t n);
|
||||||
@ -376,7 +377,8 @@ ENDPROC(bcopy)
|
|||||||
j .Lbackdstaligned # dst is now aligned,
|
j .Lbackdstaligned # dst is now aligned,
|
||||||
# return to main algorithm
|
# return to main algorithm
|
||||||
|
|
||||||
ENTRY(memmove)
|
ENTRY(__memmove)
|
||||||
|
WEAK(memmove)
|
||||||
|
|
||||||
entry sp, 16 # minimal stack frame
|
entry sp, 16 # minimal stack frame
|
||||||
# a2/ dst, a3/ src, a4/ len
|
# a2/ dst, a3/ src, a4/ len
|
||||||
@ -548,4 +550,4 @@ ENTRY(memmove)
|
|||||||
s8i a6, a5, 0
|
s8i a6, a5, 0
|
||||||
retw
|
retw
|
||||||
|
|
||||||
ENDPROC(memmove)
|
ENDPROC(__memmove)
|
||||||
|
@ -31,7 +31,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
.text
|
.text
|
||||||
ENTRY(memset)
|
ENTRY(__memset)
|
||||||
|
WEAK(memset)
|
||||||
|
|
||||||
entry sp, 16 # minimal stack frame
|
entry sp, 16 # minimal stack frame
|
||||||
# a2/ dst, a3/ c, a4/ length
|
# a2/ dst, a3/ c, a4/ length
|
||||||
@ -140,7 +141,7 @@ EX(10f) s8i a3, a5, 0
|
|||||||
.Lbytesetdone:
|
.Lbytesetdone:
|
||||||
retw
|
retw
|
||||||
|
|
||||||
ENDPROC(memset)
|
ENDPROC(__memset)
|
||||||
|
|
||||||
.section .fixup, "ax"
|
.section .fixup, "ax"
|
||||||
.align 4
|
.align 4
|
||||||
|
@ -5,3 +5,8 @@
|
|||||||
obj-y := init.o misc.o
|
obj-y := init.o misc.o
|
||||||
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
|
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
|
||||||
obj-$(CONFIG_HIGHMEM) += highmem.o
|
obj-$(CONFIG_HIGHMEM) += highmem.o
|
||||||
|
obj-$(CONFIG_KASAN) += kasan_init.o
|
||||||
|
|
||||||
|
KASAN_SANITIZE_fault.o := n
|
||||||
|
KASAN_SANITIZE_kasan_init.o := n
|
||||||
|
KASAN_SANITIZE_mmu.o := n
|
||||||
|
@ -100,6 +100,9 @@ void __init mem_init(void)
|
|||||||
|
|
||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
pr_info("virtual kernel memory layout:\n"
|
pr_info("virtual kernel memory layout:\n"
|
||||||
|
#ifdef CONFIG_KASAN
|
||||||
|
" kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
" vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
" vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
||||||
#endif
|
#endif
|
||||||
@ -108,6 +111,10 @@ void __init mem_init(void)
|
|||||||
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
||||||
#endif
|
#endif
|
||||||
" lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
|
" lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
|
||||||
|
#ifdef CONFIG_KASAN
|
||||||
|
KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
|
||||||
|
KASAN_SHADOW_SIZE >> 20,
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
VMALLOC_START, VMALLOC_END,
|
VMALLOC_START, VMALLOC_END,
|
||||||
(VMALLOC_END - VMALLOC_START) >> 20,
|
(VMALLOC_END - VMALLOC_START) >> 20,
|
||||||
|
95
arch/xtensa/mm/kasan_init.c
Normal file
95
arch/xtensa/mm/kasan_init.c
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
/*
|
||||||
|
* Xtensa KASAN shadow map initialization
|
||||||
|
*
|
||||||
|
* This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
|
* for more details.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017 Cadence Design Systems Inc.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/bootmem.h>
|
||||||
|
#include <linux/init_task.h>
|
||||||
|
#include <linux/kasan.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
|
#include <asm/initialize_mmu.h>
|
||||||
|
#include <asm/tlbflush.h>
|
||||||
|
#include <asm/traps.h>
|
||||||
|
|
||||||
|
void __init kasan_early_init(void)
|
||||||
|
{
|
||||||
|
unsigned long vaddr = KASAN_SHADOW_START;
|
||||||
|
pgd_t *pgd = pgd_offset_k(vaddr);
|
||||||
|
pmd_t *pmd = pmd_offset(pgd, vaddr);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||||
|
set_pte(kasan_zero_pte + i,
|
||||||
|
mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
|
||||||
|
|
||||||
|
for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
|
||||||
|
BUG_ON(!pmd_none(*pmd));
|
||||||
|
set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
|
||||||
|
}
|
||||||
|
early_trap_init();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init populate(void *start, void *end)
|
||||||
|
{
|
||||||
|
unsigned long n_pages = (end - start) / PAGE_SIZE;
|
||||||
|
unsigned long n_pmds = n_pages / PTRS_PER_PTE;
|
||||||
|
unsigned long i, j;
|
||||||
|
unsigned long vaddr = (unsigned long)start;
|
||||||
|
pgd_t *pgd = pgd_offset_k(vaddr);
|
||||||
|
pmd_t *pmd = pmd_offset(pgd, vaddr);
|
||||||
|
pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
|
||||||
|
|
||||||
|
pr_debug("%s: %p - %p\n", __func__, start, end);
|
||||||
|
|
||||||
|
for (i = j = 0; i < n_pmds; ++i) {
|
||||||
|
int k;
|
||||||
|
|
||||||
|
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
|
||||||
|
phys_addr_t phys =
|
||||||
|
memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
||||||
|
MEMBLOCK_ALLOC_ANYWHERE);
|
||||||
|
|
||||||
|
set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
|
||||||
|
set_pmd(pmd + i, __pmd((unsigned long)pte));
|
||||||
|
|
||||||
|
local_flush_tlb_all();
|
||||||
|
memset(start, 0, end - start);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init kasan_init(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
|
||||||
|
(KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
|
||||||
|
BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Replace shadow map pages that cover addresses from VMALLOC area
|
||||||
|
* start to the end of KSEG with clean writable pages.
|
||||||
|
*/
|
||||||
|
populate(kasan_mem_to_shadow((void *)VMALLOC_START),
|
||||||
|
kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
|
||||||
|
|
||||||
|
/* Write protect kasan_zero_page and zero-initialize it again. */
|
||||||
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||||
|
set_pte(kasan_zero_pte + i,
|
||||||
|
mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
|
||||||
|
|
||||||
|
local_flush_tlb_all();
|
||||||
|
memset(kasan_zero_page, 0, PAGE_SIZE);
|
||||||
|
|
||||||
|
/* At this point kasan is fully initialized. Enable error messages. */
|
||||||
|
current->kasan_depth = 0;
|
||||||
|
pr_info("KernelAddressSanitizer initialized\n");
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user