The various uses of protect_kernel_linear_mapping_text_rodata() are not consistent: - Its definition depends on "64BIT && !XIP_KERNEL", - Its forward declaration depends on MMU, - Its single caller depends on "STRICT_KERNEL_RWX && 64BIT && MMU && !XIP_KERNEL". Fix this by settling on the dependencies of the caller, which can be simplified as STRICT_KERNEL_RWX depends on "MMU && !XIP_KERNEL". Provide a dummy definition, as the caller is protected by "IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)" instead of "#ifdef CONFIG_STRICT_KERNEL_RWX". Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Tested-by: Alexandre Ghiti <alex@ghiti.fr> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
52 lines
1.6 KiB
C
52 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2019 SiFive
|
|
*/
|
|
|
|
#ifndef _ASM_RISCV_SET_MEMORY_H
|
|
#define _ASM_RISCV_SET_MEMORY_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
/*
|
|
* Functions to change memory attributes.
|
|
*/
|
|
#ifdef CONFIG_MMU
|
|
int set_memory_ro(unsigned long addr, int numpages);
|
|
int set_memory_rw(unsigned long addr, int numpages);
|
|
int set_memory_x(unsigned long addr, int numpages);
|
|
int set_memory_nx(unsigned long addr, int numpages);
|
|
int set_memory_rw_nx(unsigned long addr, int numpages);
|
|
void protect_kernel_text_data(void);
|
|
#else
|
|
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
|
|
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
|
|
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
|
|
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
|
|
static inline void protect_kernel_text_data(void) {}
|
|
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
|
|
#endif
|
|
|
|
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
|
|
void protect_kernel_linear_mapping_text_rodata(void);
|
|
#else
|
|
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
|
|
#endif
|
|
|
|
int set_direct_map_invalid_noflush(struct page *page);
|
|
int set_direct_map_default_noflush(struct page *page);
|
|
bool kernel_page_present(struct page *page);
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_STRICT_KERNEL_RWX
|
|
#ifdef CONFIG_64BIT
|
|
#define SECTION_ALIGN (1 << 21)
|
|
#else
|
|
#define SECTION_ALIGN (1 << 22)
|
|
#endif
|
|
#else /* !CONFIG_STRICT_KERNEL_RWX */
|
|
#define SECTION_ALIGN L1_CACHE_BYTES
|
|
#endif /* CONFIG_STRICT_KERNEL_RWX */
|
|
|
|
#endif /* _ASM_RISCV_SET_MEMORY_H */
|