RISC-V: Add arch functions for non-retentive suspend entry/exit

The hart registers and CSRs are not preserved in non-retentative
suspend state so we provide arch specific helper functions which
will save/restore hart context upon entry/exit to non-retentive
suspend state. These helper functions can be used by cpuidle
drivers for non-retentive suspend entry/exit.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Guo Ren <guoren@kernel.org>
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
Anup Patel 2022-02-10 11:19:42 +05:30 committed by Palmer Dabbelt
parent e1de2c93e7
commit 63b13e64a8
No known key found for this signature in database
GPG Key ID: 2E1319F35FBB1889
7 changed files with 279 additions and 21 deletions

View File

@ -67,4 +67,31 @@
#error "Unexpected __SIZEOF_SHORT__"
#endif
#ifdef __ASSEMBLY__
/* Common assembly source macros */
#ifdef CONFIG_XIP_KERNEL
.macro XIP_FIXUP_OFFSET reg
REG_L t0, _xip_fixup
add \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
la t1, __data_loc
li t0, XIP_OFFSET_MASK
and t1, t1, t0
li t1, XIP_OFFSET
sub t0, t0, t1
sub \reg, \reg, t0
.endm
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
.endm
#endif /* CONFIG_XIP_KERNEL */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_ASM_H */

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#ifndef _ASM_RISCV_SUSPEND_H
#define _ASM_RISCV_SUSPEND_H
#include <asm/ptrace.h>
struct suspend_context {
/* Saved and restored by low-level functions */
struct pt_regs regs;
/* Saved and restored by high-level functions */
unsigned long scratch;
unsigned long tvec;
unsigned long ie;
#ifdef CONFIG_MMU
unsigned long satp;
#endif
};
/* Low-level CPU suspend entry function */
int __cpu_suspend_enter(struct suspend_context *context);
/* High-level CPU suspend which will save context and call finish() */
int cpu_suspend(unsigned long arg,
int (*finish)(unsigned long arg,
unsigned long entry,
unsigned long context));
/* Low-level CPU resume entry function */
int __cpu_resume_enter(unsigned long hartid, unsigned long context);
#endif

View File

@ -48,6 +48,8 @@ obj-$(CONFIG_RISCV_BOOT_SPINWAIT) += cpu_ops_spinwait.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o

View File

@ -13,6 +13,7 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cpu_ops_sbi.h>
#include <asm/suspend.h>
void asm_offsets(void);
@ -113,6 +114,8 @@ void asm_offsets(void)
OFFSET(PT_BADADDR, pt_regs, badaddr);
OFFSET(PT_CAUSE, pt_regs, cause);
OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);

View File

@ -16,27 +16,6 @@
#include <asm/image.h>
#include "efi-header.S"
#ifdef CONFIG_XIP_KERNEL
.macro XIP_FIXUP_OFFSET reg
REG_L t0, _xip_fixup
add \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
la t1, __data_loc
li t0, XIP_OFFSET_MASK
and t1, t1, t0
li t1, XIP_OFFSET
sub t0, t0, t1
sub \reg, \reg, t0
.endm
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
.endm
#endif /* CONFIG_XIP_KERNEL */
__HEAD
ENTRY(_start)
/*

View File

@ -0,0 +1,87 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#include <linux/ftrace.h>
#include <asm/csr.h>
#include <asm/suspend.h>
static void suspend_save_csrs(struct suspend_context *context)
{
context->scratch = csr_read(CSR_SCRATCH);
context->tvec = csr_read(CSR_TVEC);
context->ie = csr_read(CSR_IE);
/*
* No need to save/restore IP CSR (i.e. MIP or SIP) because:
*
* 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
* external devices (such as interrupt controller, timer, etc).
* 2. For MMU (S-mode) kernel, the bits in SIP are set by
* M-mode firmware and external devices (such as interrupt
* controller, etc).
*/
#ifdef CONFIG_MMU
context->satp = csr_read(CSR_SATP);
#endif
}
static void suspend_restore_csrs(struct suspend_context *context)
{
csr_write(CSR_SCRATCH, context->scratch);
csr_write(CSR_TVEC, context->tvec);
csr_write(CSR_IE, context->ie);
#ifdef CONFIG_MMU
csr_write(CSR_SATP, context->satp);
#endif
}
int cpu_suspend(unsigned long arg,
int (*finish)(unsigned long arg,
unsigned long entry,
unsigned long context))
{
int rc = 0;
struct suspend_context context = { 0 };
/* Finisher should be non-NULL */
if (!finish)
return -EINVAL;
/* Save additional CSRs*/
suspend_save_csrs(&context);
/*
* Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka finishers) hence disable
* graph tracing during their execution.
*/
pause_graph_tracing();
/* Save context on stack */
if (__cpu_suspend_enter(&context)) {
/* Call the finisher */
rc = finish(arg, __pa_symbol(__cpu_resume_enter),
(ulong)&context);
/*
* Should never reach here, unless the suspend finisher
* fails. Successful cpu_suspend() should return from
* __cpu_resume_entry()
*/
if (!rc)
rc = -EOPNOTSUPP;
}
/* Enable function graph tracer */
unpause_graph_tracing();
/* Restore additional CSRs */
suspend_restore_csrs(&context);
return rc;
}

View File

@ -0,0 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/csr.h>
.text
.altmacro
.option norelax
ENTRY(__cpu_suspend_enter)
/* Save registers (except A0 and T0-T6) */
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
REG_S gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
REG_S tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
REG_S s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
REG_S s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
REG_S a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
REG_S a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
REG_S a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
REG_S a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
REG_S a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
REG_S a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
REG_S a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
REG_S s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
REG_S s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
REG_S s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
REG_S s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
REG_S s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
REG_S s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
REG_S s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
REG_S s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
REG_S s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
REG_S s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
/* Save CSRs */
csrr t0, CSR_EPC
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
csrr t0, CSR_STATUS
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
csrr t0, CSR_TVAL
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrr t0, CSR_CAUSE
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
/* Return non-zero value */
li a0, 1
/* Return to C code */
ret
END(__cpu_suspend_enter)
ENTRY(__cpu_resume_enter)
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
#ifdef CONFIG_MMU
/* Save A0 and A1 */
add t0, a0, zero
add t1, a1, zero
/* Enable MMU */
la a0, swapper_pg_dir
XIP_FIXUP_OFFSET a0
call relocate_enable_mmu
/* Restore A0 and A1 */
add a0, t0, zero
add a1, t1, zero
#endif
/* Make A0 point to suspend context */
add a0, a1, zero
/* Restore CSRs */
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
csrw CSR_EPC, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
csrw CSR_STATUS, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrw CSR_TVAL, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
csrw CSR_CAUSE, t0
/* Restore registers (except A0 and T0-T6) */
REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
/* Return zero value */
add a0, zero, zero
/* Return to C code */
ret
END(__cpu_resume_enter)