Merge patch series "riscv: SCS support"

Sami Tolvanen <samitolvanen@google.com> says:

This series adds Shadow Call Stack (SCS) support for RISC-V. SCS
uses compiler instrumentation to store return addresses in a
separate shadow stack to protect them against accidental or
malicious overwrites. More information about SCS can be found
here:

  https://clang.llvm.org/docs/ShadowCallStack.html

Patch 1 is from Deepak, and it simplifies VMAP_STACK overflow
handling by adding support for accessing per-CPU variables
directly in assembly. The patch is included in this series to
make IRQ stack switching cleaner with SCS, and I've simply
rebased it and fixed a couple of minor issues. Patch 2 uses this
functionality to clean up the stack switching by moving duplicate
code into a single function. On RISC-V, the compiler uses the
gp register for storing the current shadow call stack pointer,
which is incompatible with global pointer relaxation. Patch 3
moves global pointer loading into a macro that can be easily
disabled with SCS. Patch 4 implements SCS register loading and
switching, and allows the feature to be enabled, and patch 5 adds
separate per-CPU IRQ shadow call stacks when CONFIG_IRQ_STACKS is
enabled. Patch 6 fixes the backward-edge CFI test in lkdtm for
RISC-V.

Note that this series requires Clang 17. Earlier Clang versions
support SCS on RISC-V, but use the x18 register instead of gp,
which isn't ideal. gcc has SCS support for arm64, but I'm not
aware of plans to support RISC-V. Once the Zicfiss extension is
ratified, it's probably preferable to use hardware-backed shadow
stacks instead of SCS on hardware that supports the extension,
and we may want to consider implementing CONFIG_DYNAMIC_SCS to
patch between the implementation at runtime (similarly to the
arm64 implementation, which switches to SCS when hardware PAC
support isn't available).

* b4-shazam-merge:
  lkdtm: Fix CFI_BACKWARD on RISC-V
  riscv: Use separate IRQ shadow call stacks
  riscv: Implement Shadow Call Stack
  riscv: Move global pointer loading to a macro
  riscv: Deduplicate IRQ stack switching
  riscv: VMAP_STACK overflow detection thread-safe

Link: https://lore.kernel.org/r/20230927224757.1154247-8-samitolvanen@google.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
Palmer Dabbelt 2023-11-02 14:05:23 -07:00
commit 24005d184a
No known key found for this signature in database
GPG Key ID: 2E1319F35FBB1889
16 changed files with 248 additions and 177 deletions

View File

@ -49,6 +49,7 @@ config RISCV
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
@ -175,6 +176,11 @@ config GCC_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=8)
config HAVE_SHADOW_CALL_STACK
def_bool $(cc-option,-fsanitize=shadow-call-stack)
# https://github.com/riscv-non-isa/riscv-elf-psabi-doc/commit/a484e843e6eeb51f0cb7b8819e50da6d2444d769
depends on $(ld-option,--no-relax-gp)
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
default 8

View File

@ -55,6 +55,10 @@ endif
endif
endif
ifeq ($(CONFIG_SHADOW_CALL_STACK),y)
KBUILD_LDFLAGS += --no-relax-gp
endif
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima

View File

@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
DECLARE_DO_ERROR_INFO(do_trap_break);
asmlinkage unsigned long get_overflow_stack(void);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void do_irq(struct pt_regs *regs);

View File

@ -82,6 +82,47 @@
.endr
.endm
#ifdef CONFIG_SMP
#ifdef CONFIG_32BIT
#define PER_CPU_OFFSET_SHIFT 2
#else
#define PER_CPU_OFFSET_SHIFT 3
#endif
.macro asm_per_cpu dst sym tmp
REG_L \tmp, TASK_TI_CPU_NUM(tp)
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
la \dst, __per_cpu_offset
add \dst, \dst, \tmp
REG_L \tmp, 0(\dst)
la \dst, \sym
add \dst, \dst, \tmp
.endm
#else /* CONFIG_SMP */
.macro asm_per_cpu dst sym tmp
la \dst, \sym
.endm
#endif /* CONFIG_SMP */
.macro load_per_cpu dst ptr tmp
asm_per_cpu \dst \ptr \tmp
REG_L \dst, 0(\dst)
.endm
#ifdef CONFIG_SHADOW_CALL_STACK
/* gp is used as the shadow call stack pointer instead */
.macro load_global_pointer
.endm
#else
/* load __global_pointer to gp */
.macro load_global_pointer
.option push
.option norelax
la gp, __global_pointer$
.option pop
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
/* save all GPs except x1 ~ x5 */
.macro save_from_x6_to_x31
REG_S x6, PT_T1(sp)

View File

@ -12,6 +12,9 @@
DECLARE_PER_CPU(ulong *, irq_stack_ptr);
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
void (*func)(struct pt_regs *));
#ifdef CONFIG_VMAP_STACK
/*
* To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd

View File

@ -0,0 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCS_H
#define _ASM_SCS_H
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
#ifdef CONFIG_SHADOW_CALL_STACK
/* Load init_shadow_call_stack to gp. */
.macro scs_load_init_stack
la gp, init_shadow_call_stack
XIP_FIXUP_OFFSET gp
.endm
/* Load the per-CPU IRQ shadow call stack to gp. */
.macro scs_load_irq_stack tmp
load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp
.endm
/* Load task_scs_sp(current) to gp. */
.macro scs_load_current
REG_L gp, TASK_TI_SCS_SP(tp)
.endm
/* Load task_scs_sp(current) to gp, but only if tp has changed. */
.macro scs_load_current_if_task_changed prev
beq \prev, tp, _skip_scs
scs_load_current
_skip_scs:
.endm
/* Save gp to task_scs_sp(current). */
.macro scs_save_current
REG_S gp, TASK_TI_SCS_SP(tp)
.endm
#else /* CONFIG_SHADOW_CALL_STACK */
.macro scs_load_init_stack
.endm
.macro scs_load_irq_stack tmp
.endm
.macro scs_load_current
.endm
.macro scs_load_current_if_task_changed prev
.endm
.macro scs_save_current
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SCS_H */

View File

@ -34,9 +34,6 @@
#ifndef __ASSEMBLY__
extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
extern unsigned long spin_shadow_stack;
#include <asm/processor.h>
#include <asm/csr.h>
@ -60,8 +57,20 @@ struct thread_info {
long user_sp; /* User stack pointer */
int cpu;
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
#ifdef CONFIG_SHADOW_CALL_STACK
void *scs_base;
void *scs_sp;
#endif
};
#ifdef CONFIG_SHADOW_CALL_STACK
#define INIT_SCS \
.scs_base = init_shadow_call_stack, \
.scs_sp = init_shadow_call_stack,
#else
#define INIT_SCS
#endif
/*
* macros/functions for gaining access to the thread information structure
*
@ -71,6 +80,7 @@ struct thread_info {
{ \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
INIT_SCS \
}
void arch_release_task_struct(struct task_struct *tsk);

View File

@ -14,6 +14,7 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cpu_ops_sbi.h>
#include <asm/stacktrace.h>
#include <asm/suspend.h>
void asm_offsets(void);
@ -38,7 +39,11 @@ void asm_offsets(void)
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
#ifdef CONFIG_SHADOW_CALL_STACK
OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
#endif
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
@ -479,4 +484,8 @@ void asm_offsets(void)
OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr);
OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr);
DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
OFFSET(STACKFRAME_FP, stackframe, fp);
OFFSET(STACKFRAME_RA, stackframe, ra);
}

View File

@ -9,10 +9,13 @@
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/scs.h>
#include <asm/unistd.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/errata_list.h>
#include <linux/sizes.h>
.section .irqentry.text, "ax"
@ -75,10 +78,11 @@ _save_context:
csrw CSR_SCRATCH, x0
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
/* Load the kernel shadow call stack pointer if coming from userspace */
scs_load_current_if_task_changed s5
move a0, sp /* pt_regs */
la ra, ret_from_exception
@ -125,6 +129,9 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
/* Save the kernel shadow call stack pointer */
scs_save_current
/*
* Save TP into the scratch register , so we can find the kernel data
* structures again.
@ -172,67 +179,15 @@ SYM_CODE_END(ret_from_exception)
#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
/*
* Takes the psuedo-spinlock for the shadow stack, in case multiple
* harts are concurrently overflowing their kernel stacks. We could
* store any value here, but since we're overflowing the kernel stack
* already we only have SP to use as a scratch register. So we just
* swap in the address of the spinlock, as that's definately non-zero.
*
* Pairs with a store_release in handle_bad_stack().
*/
1: la sp, spin_shadow_stack
REG_AMOSWAP_AQ sp, sp, (sp)
bnez sp, 1b
/* we reach here from kernel context, sscratch must be 0 */
csrrw x31, CSR_SCRATCH, x31
asm_per_cpu sp, overflow_stack, x31
li x31, OVERFLOW_STACK_SIZE
add sp, sp, x31
/* zero out x31 again and restore x31 */
xor x31, x31, x31
csrrw x31, CSR_SCRATCH, x31
la sp, shadow_stack
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
//save caller register to shadow stack
addi sp, sp, -(PT_SIZE_ON_STACK)
REG_S x1, PT_RA(sp)
REG_S x5, PT_T0(sp)
REG_S x6, PT_T1(sp)
REG_S x7, PT_T2(sp)
REG_S x10, PT_A0(sp)
REG_S x11, PT_A1(sp)
REG_S x12, PT_A2(sp)
REG_S x13, PT_A3(sp)
REG_S x14, PT_A4(sp)
REG_S x15, PT_A5(sp)
REG_S x16, PT_A6(sp)
REG_S x17, PT_A7(sp)
REG_S x28, PT_T3(sp)
REG_S x29, PT_T4(sp)
REG_S x30, PT_T5(sp)
REG_S x31, PT_T6(sp)
la ra, restore_caller_reg
tail get_overflow_stack
restore_caller_reg:
//save per-cpu overflow stack
REG_S a0, -8(sp)
//restore caller register from shadow_stack
REG_L x1, PT_RA(sp)
REG_L x5, PT_T0(sp)
REG_L x6, PT_T1(sp)
REG_L x7, PT_T2(sp)
REG_L x10, PT_A0(sp)
REG_L x11, PT_A1(sp)
REG_L x12, PT_A2(sp)
REG_L x13, PT_A3(sp)
REG_L x14, PT_A4(sp)
REG_L x15, PT_A5(sp)
REG_L x16, PT_A6(sp)
REG_L x17, PT_A7(sp)
REG_L x28, PT_T3(sp)
REG_L x29, PT_T4(sp)
REG_L x30, PT_T5(sp)
REG_L x31, PT_T6(sp)
//load per-cpu overflow stack
REG_L sp, -8(sp)
addi sp, sp, -(PT_SIZE_ON_STACK)
//save context to overflow stack
@ -270,6 +225,43 @@ SYM_CODE_START(ret_from_fork)
tail syscall_exit_to_user_mode
SYM_CODE_END(ret_from_fork)
#ifdef CONFIG_IRQ_STACKS
/*
* void call_on_irq_stack(struct pt_regs *regs,
* void (*func)(struct pt_regs *));
*
* Calls func(regs) using the per-CPU IRQ stack.
*/
SYM_FUNC_START(call_on_irq_stack)
/* Create a frame record to save ra and s0 (fp) */
addi sp, sp, -STACKFRAME_SIZE_ON_STACK
REG_S ra, STACKFRAME_RA(sp)
REG_S s0, STACKFRAME_FP(sp)
addi s0, sp, STACKFRAME_SIZE_ON_STACK
/* Switch to the per-CPU shadow call stack */
scs_save_current
scs_load_irq_stack t0
/* Switch to the per-CPU IRQ stack and call the handler */
load_per_cpu t0, irq_stack_ptr, t1
li t1, IRQ_STACK_SIZE
add sp, t0, t1
jalr a1
/* Switch back to the thread shadow call stack */
scs_load_current
/* Switch back to the thread stack and restore ra and s0 */
addi sp, s0, -STACKFRAME_SIZE_ON_STACK
REG_L ra, STACKFRAME_RA(sp)
REG_L s0, STACKFRAME_FP(sp)
addi sp, sp, STACKFRAME_SIZE_ON_STACK
ret
SYM_FUNC_END(call_on_irq_stack)
#endif /* CONFIG_IRQ_STACKS */
/*
* Integer register context switch
* The callee-saved registers must be saved and restored.
@ -299,6 +291,8 @@ SYM_FUNC_START(__switch_to)
REG_S s9, TASK_THREAD_S9_RA(a3)
REG_S s10, TASK_THREAD_S10_RA(a3)
REG_S s11, TASK_THREAD_S11_RA(a3)
/* Save the kernel shadow call stack pointer */
scs_save_current
/* Restore context from next->thread */
REG_L ra, TASK_THREAD_RA_RA(a4)
REG_L sp, TASK_THREAD_SP_RA(a4)
@ -316,6 +310,8 @@ SYM_FUNC_START(__switch_to)
REG_L s11, TASK_THREAD_S11_RA(a4)
/* The offset of thread_info in task_struct is zero. */
move tp, a1
/* Switch to the next shadow call stack */
scs_load_current
ret
SYM_FUNC_END(__switch_to)

View File

@ -14,6 +14,7 @@
#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h>
#include <asm/image.h>
#include <asm/scs.h>
#include <asm/xip_fixup.h>
#include "efi-header.S"
@ -110,10 +111,7 @@ relocate_enable_mmu:
csrw CSR_TVEC, a0
/* Reload the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
/*
* Switch to kernel page tables. A full fence is necessary in order to
@ -134,10 +132,7 @@ secondary_start_sbi:
csrw CSR_IP, zero
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
/*
* Disable FPU & VECTOR to detect illegal usage of
@ -159,6 +154,7 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a3
add a3, a3, a1
REG_L sp, (a3)
scs_load_current
.Lsecondary_start_common:
@ -228,10 +224,7 @@ pmp_done:
#endif /* CONFIG_RISCV_M_MODE */
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
/*
* Disable FPU & VECTOR to detect illegal usage of
@ -298,6 +291,7 @@ clear_bss_done:
la sp, init_thread_union + THREAD_SIZE
XIP_FIXUP_OFFSET sp
addi sp, sp, -PT_SIZE_ON_STACK
scs_load_init_stack
#ifdef CONFIG_BUILTIN_DTB
la a0, __dtb_start
XIP_FIXUP_OFFSET a0
@ -316,6 +310,7 @@ clear_bss_done:
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
addi sp, sp, -PT_SIZE_ON_STACK
scs_load_current
#ifdef CONFIG_KASAN
call kasan_early_init

View File

@ -9,6 +9,7 @@
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/scs.h>
#include <linux/seq_file.h>
#include <asm/sbi.h>
#include <asm/smp.h>
@ -34,6 +35,24 @@ EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
#ifdef CONFIG_IRQ_STACKS
#include <asm/irq_stack.h>
DECLARE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
#ifdef CONFIG_SHADOW_CALL_STACK
DEFINE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
#endif
static void init_irq_scs(void)
{
int cpu;
if (!scs_is_enabled())
return;
for_each_possible_cpu(cpu)
per_cpu(irq_shadow_call_stack_ptr, cpu) =
scs_alloc(cpu_to_node(cpu));
}
DEFINE_PER_CPU(ulong *, irq_stack_ptr);
#ifdef CONFIG_VMAP_STACK
@ -61,40 +80,22 @@ static void init_irq_stacks(void)
#endif /* CONFIG_VMAP_STACK */
#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
static void ___do_softirq(struct pt_regs *regs)
{
__do_softirq();
}
void do_softirq_own_stack(void)
{
#ifdef CONFIG_IRQ_STACKS
if (on_thread_stack()) {
ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
+ IRQ_STACK_SIZE/sizeof(ulong);
__asm__ __volatile(
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" ra, (sp) \n"
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" s0, (sp) \n"
"addi s0, sp, 2*"RISCV_SZPTR "\n"
"move sp, %[sp] \n"
"call __do_softirq \n"
"addi sp, s0, -2*"RISCV_SZPTR"\n"
REG_L" s0, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
REG_L" ra, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
:
: [sp] "r" (sp)
: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3", "t4", "t5", "t6",
#ifndef CONFIG_FRAME_POINTER
"s0",
#endif
"memory");
} else
#endif
if (on_thread_stack())
call_on_irq_stack(NULL, ___do_softirq);
else
__do_softirq();
}
#endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
#else
static void init_irq_scs(void) {}
static void init_irq_stacks(void) {}
#endif /* CONFIG_IRQ_STACKS */
@ -106,6 +107,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
void __init init_IRQ(void)
{
init_irq_scs();
init_irq_stacks();
irqchip_init();
if (!handle_arch_irq)

View File

@ -61,10 +61,7 @@ END(__cpu_suspend_enter)
SYM_TYPED_FUNC_START(__cpu_resume_enter)
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
#ifdef CONFIG_MMU
/* Save A0 and A1 */

View File

@ -352,34 +352,10 @@ static void noinstr handle_riscv_irq(struct pt_regs *regs)
asmlinkage void noinstr do_irq(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
#ifdef CONFIG_IRQ_STACKS
if (on_thread_stack()) {
ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
+ IRQ_STACK_SIZE/sizeof(ulong);
__asm__ __volatile(
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" ra, (sp) \n"
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" s0, (sp) \n"
"addi s0, sp, 2*"RISCV_SZPTR "\n"
"move sp, %[sp] \n"
"move a0, %[regs] \n"
"call handle_riscv_irq \n"
"addi sp, s0, -2*"RISCV_SZPTR"\n"
REG_L" s0, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
REG_L" ra, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
:
: [sp] "r" (sp), [regs] "r" (regs)
: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3", "t4", "t5", "t6",
#ifndef CONFIG_FRAME_POINTER
"s0",
#endif
"memory");
} else
#endif
if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
call_on_irq_stack(regs, handle_riscv_irq);
else
handle_riscv_irq(regs);
irqentry_exit(regs, state);
@ -402,48 +378,14 @@ int is_valid_bugaddr(unsigned long pc)
#endif /* CONFIG_GENERIC_BUG */
#ifdef CONFIG_VMAP_STACK
/*
* Extra stack space that allows us to provide panic messages when the kernel
* has overflowed its stack.
*/
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
overflow_stack)__aligned(16);
/*
* A temporary stack for use by handle_kernel_stack_overflow. This is used so
* we can call into C code to get the per-hart overflow stack. Usage of this
* stack must be protected by spin_shadow_stack.
*/
long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
/*
* A pseudo spinlock to protect the shadow stack from being used by multiple
* harts concurrently. This isn't a real spinlock because the lock side must
* be taken without a valid stack and only a single register, it's only taken
* while in the process of panicing anyway so the performance and error
* checking a proper spinlock gives us doesn't matter.
*/
unsigned long spin_shadow_stack;
asmlinkage unsigned long get_overflow_stack(void)
{
return (unsigned long)this_cpu_ptr(overflow_stack) +
OVERFLOW_STACK_SIZE;
}
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
/*
* We're done with the shadow stack by this point, as we're on the
* overflow stack. Tell any other concurrent overflowing harts that
* they can proceed with panicing by releasing the pseudo-spinlock.
*
* This pairs with an amoswap.aq in handle_kernel_stack_overflow.
*/
smp_store_release(&spin_shadow_stack, 0);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!\n");

View File

@ -36,7 +36,7 @@ CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
endif
# Disable -pg to prevent insert call site
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
# Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n

View File

@ -85,6 +85,10 @@ ifdef CONFIG_RELOCATABLE
PURGATORY_CFLAGS_REMOVE += -fPIE
endif
ifdef CONFIG_SHADOW_CALL_STACK
PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_SCS)
endif
CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)

View File

@ -68,12 +68,20 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
#define no_pac_addr(addr) \
((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
#ifdef CONFIG_RISCV
/* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#frame-pointer-convention */
#define FRAME_RA_OFFSET (-1)
#else
#define FRAME_RA_OFFSET 1
#endif
/* The ultimate ROP gadget. */
static noinline __no_ret_protection
void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
unsigned long * volatile *ret_addr =
(unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)
@ -88,7 +96,8 @@ static noinline
void set_return_addr(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
unsigned long * volatile *ret_addr =
(unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)