2019-04-18 06:51:24 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2020-11-27 04:44:07 +00:00
|
|
|
#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
|
|
|
|
#define _ASM_POWERPC_BOOK3S_64_KUP_H
|
2019-04-18 06:51:24 +00:00
|
|
|
|
|
|
|
#include <linux/const.h>
|
2020-02-25 17:35:34 +00:00
|
|
|
#include <asm/reg.h>
|
2019-04-18 06:51:24 +00:00
|
|
|
|
2020-11-27 04:44:19 +00:00
|
|
|
#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
|
|
|
|
#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
|
2020-11-27 04:44:20 +00:00
|
|
|
#define AMR_KUEP_BLOCKED UL(0x5455555555555555)
|
2019-04-18 06:51:24 +00:00
|
|
|
#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
|
|
|
|
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
|
2020-11-27 04:44:24 +00:00
|
|
|
.macro kuap_user_restore gpr1, gpr2
|
2020-11-27 04:44:12 +00:00
|
|
|
#if defined(CONFIG_PPC_PKEY)
|
2019-04-18 06:51:24 +00:00
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
2020-11-27 04:44:24 +00:00
|
|
|
b 100f // skip_restore_amr
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
|
2020-11-27 04:44:12 +00:00
|
|
|
/*
|
|
|
|
* AMR and IAMR are going to be different when
|
|
|
|
* returning to userspace.
|
|
|
|
*/
|
|
|
|
ld \gpr1, STACK_REGS_AMR(r1)
|
2020-11-27 04:44:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If kuap feature is not enabled, do the mtspr
|
|
|
|
* only if AMR value is different.
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(68)
|
|
|
|
mfspr \gpr2, SPRN_AMR
|
|
|
|
cmpd \gpr1, \gpr2
|
|
|
|
beq 99f
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
|
|
|
|
|
2020-11-27 04:44:12 +00:00
|
|
|
isync
|
|
|
|
mtspr SPRN_AMR, \gpr1
|
2020-11-27 04:44:24 +00:00
|
|
|
99:
|
2020-11-27 04:44:12 +00:00
|
|
|
/*
|
|
|
|
* Restore IAMR only when returning to userspace
|
|
|
|
*/
|
|
|
|
ld \gpr1, STACK_REGS_IAMR(r1)
|
2020-11-27 04:44:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If kuep feature is not enabled, do the mtspr
|
|
|
|
* only if IAMR value is different.
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(69)
|
|
|
|
mfspr \gpr2, SPRN_IAMR
|
|
|
|
cmpd \gpr1, \gpr2
|
|
|
|
beq 100f
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
|
|
|
|
|
|
|
|
isync
|
2020-11-27 04:44:12 +00:00
|
|
|
mtspr SPRN_IAMR, \gpr1
|
|
|
|
|
2020-11-27 04:44:24 +00:00
|
|
|
100: //skip_restore_amr
|
2020-11-27 04:44:12 +00:00
|
|
|
/* No isync required, see kuap_user_restore() */
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2020-11-27 04:44:24 +00:00
|
|
|
.macro kuap_kernel_restore gpr1, gpr2
|
2020-11-27 04:44:12 +00:00
|
|
|
#if defined(CONFIG_PPC_PKEY)
|
|
|
|
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
/*
|
|
|
|
* AMR is going to be mostly the same since we are
|
|
|
|
* returning to the kernel. Compare and do a mtspr.
|
|
|
|
*/
|
2020-11-27 04:44:05 +00:00
|
|
|
ld \gpr2, STACK_REGS_AMR(r1)
|
2020-11-27 04:44:12 +00:00
|
|
|
mfspr \gpr1, SPRN_AMR
|
2020-04-29 06:56:54 +00:00
|
|
|
cmpd \gpr1, \gpr2
|
2020-11-27 04:44:12 +00:00
|
|
|
beq 100f
|
2020-04-29 06:56:50 +00:00
|
|
|
isync
|
2020-04-29 06:56:54 +00:00
|
|
|
mtspr SPRN_AMR, \gpr2
|
2020-11-27 04:44:12 +00:00
|
|
|
/*
|
|
|
|
* No isync required, see kuap_restore_amr()
|
|
|
|
* No need to restore IAMR when returning to kernel space.
|
|
|
|
*/
|
|
|
|
100:
|
2020-11-27 04:44:09 +00:00
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
|
2019-04-18 06:51:24 +00:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2020-11-19 12:43:53 +00:00
|
|
|
#ifdef CONFIG_PPC_KUAP
|
2019-04-18 06:51:24 +00:00
|
|
|
.macro kuap_check_amr gpr1, gpr2
|
|
|
|
#ifdef CONFIG_PPC_KUAP_DEBUG
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
mfspr \gpr1, SPRN_AMR
|
2020-11-27 04:44:19 +00:00
|
|
|
/* Prevent access to userspace using any key values */
|
|
|
|
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
|
2019-04-18 06:51:24 +00:00
|
|
|
999: tdne \gpr1, \gpr2
|
powerpc/bug: Provide better flexibility to WARN_ON/__WARN_FLAGS() with asm goto
Using asm goto in __WARN_FLAGS() and WARN_ON() allows more
flexibility to GCC.
For that add an entry to the exception table so that
program_check_exception() knowns where to resume execution
after a WARNING.
Here are two exemples. The first one is done on PPC32 (which
benefits from the previous patch), the second is on PPC64.
unsigned long test(struct pt_regs *regs)
{
int ret;
WARN_ON(regs->msr & MSR_PR);
return regs->gpr[3];
}
unsigned long test9w(unsigned long a, unsigned long b)
{
if (WARN_ON(!b))
return 0;
return a / b;
}
Before the patch:
000003a8 <test>:
3a8: 81 23 00 84 lwz r9,132(r3)
3ac: 71 29 40 00 andi. r9,r9,16384
3b0: 40 82 00 0c bne 3bc <test+0x14>
3b4: 80 63 00 0c lwz r3,12(r3)
3b8: 4e 80 00 20 blr
3bc: 0f e0 00 00 twui r0,0
3c0: 80 63 00 0c lwz r3,12(r3)
3c4: 4e 80 00 20 blr
0000000000000bf0 <.test9w>:
bf0: 7c 89 00 74 cntlzd r9,r4
bf4: 79 29 d1 82 rldicl r9,r9,58,6
bf8: 0b 09 00 00 tdnei r9,0
bfc: 2c 24 00 00 cmpdi r4,0
c00: 41 82 00 0c beq c0c <.test9w+0x1c>
c04: 7c 63 23 92 divdu r3,r3,r4
c08: 4e 80 00 20 blr
c0c: 38 60 00 00 li r3,0
c10: 4e 80 00 20 blr
After the patch:
000003a8 <test>:
3a8: 81 23 00 84 lwz r9,132(r3)
3ac: 71 29 40 00 andi. r9,r9,16384
3b0: 40 82 00 0c bne 3bc <test+0x14>
3b4: 80 63 00 0c lwz r3,12(r3)
3b8: 4e 80 00 20 blr
3bc: 0f e0 00 00 twui r0,0
0000000000000c50 <.test9w>:
c50: 7c 89 00 74 cntlzd r9,r4
c54: 79 29 d1 82 rldicl r9,r9,58,6
c58: 0b 09 00 00 tdnei r9,0
c5c: 7c 63 23 92 divdu r3,r3,r4
c60: 4e 80 00 20 blr
c70: 38 60 00 00 li r3,0
c74: 4e 80 00 20 blr
In the first exemple, we see GCC doesn't need to duplicate what
happens after the trap.
In the second exemple, we see that GCC doesn't need to emit a test
and a branch in the likely path in addition to the trap.
We've got some WARN_ON() in .softirqentry.text section so it needs
to be added in the OTHER_TEXT_SECTIONS in modpost.c
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/389962b1b702e3c78d169e59bcfac56282889173.1618331882.git.christophe.leroy@csgroup.eu
2021-04-13 16:38:10 +00:00
|
|
|
EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
|
2020-11-27 04:44:09 +00:00
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
|
2019-04-18 06:51:24 +00:00
|
|
|
#endif
|
|
|
|
.endm
|
2020-11-19 12:43:53 +00:00
|
|
|
#endif
|
2019-04-18 06:51:24 +00:00
|
|
|
|
2020-11-27 04:44:12 +00:00
|
|
|
/*
|
|
|
|
* if (pkey) {
|
|
|
|
*
|
|
|
|
* save AMR -> stack;
|
|
|
|
* if (kuap) {
|
|
|
|
* if (AMR != BLOCKED)
|
|
|
|
* KUAP_BLOCKED -> AMR;
|
|
|
|
* }
|
|
|
|
* if (from_user) {
|
|
|
|
* save IAMR -> stack;
|
|
|
|
* if (kuep) {
|
|
|
|
* KUEP_BLOCKED ->IAMR
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
* return;
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* if (kuap) {
|
|
|
|
* if (from_kernel) {
|
|
|
|
* save AMR -> stack;
|
|
|
|
* if (AMR != BLOCKED)
|
|
|
|
* KUAP_BLOCKED -> AMR;
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* }
|
|
|
|
*/
|
2019-04-18 06:51:24 +00:00
|
|
|
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
|
2020-11-27 04:44:12 +00:00
|
|
|
#if defined(CONFIG_PPC_PKEY)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if both pkey and kuap is disabled, nothing to do
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(68)
|
|
|
|
b 100f // skip_save_amr
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if pkey is disabled and we are entering from userspace
|
|
|
|
* don't do anything.
|
|
|
|
*/
|
2019-04-18 06:51:24 +00:00
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
.ifnb \msr_pr_cr
|
2020-11-27 04:44:12 +00:00
|
|
|
/*
|
|
|
|
* Without pkey we are not changing AMR outside the kernel
|
|
|
|
* hence skip this completely.
|
|
|
|
*/
|
|
|
|
bne \msr_pr_cr, 100f // from userspace
|
2019-04-18 06:51:24 +00:00
|
|
|
.endif
|
2020-11-27 04:44:12 +00:00
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pkey is enabled or pkey is disabled but entering from kernel
|
|
|
|
*/
|
2019-04-18 06:51:24 +00:00
|
|
|
mfspr \gpr1, SPRN_AMR
|
2020-11-27 04:44:05 +00:00
|
|
|
std \gpr1, STACK_REGS_AMR(r1)
|
2020-11-27 04:44:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* update kernel AMR with AMR_KUAP_BLOCKED only
|
|
|
|
* if KUAP feature is enabled
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(69)
|
|
|
|
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
|
2019-04-18 06:51:24 +00:00
|
|
|
cmpd \use_cr, \gpr1, \gpr2
|
2020-11-27 04:44:12 +00:00
|
|
|
beq \use_cr, 102f
|
|
|
|
/*
|
|
|
|
* We don't isync here because we very recently entered via an interrupt
|
|
|
|
*/
|
2019-04-18 06:51:24 +00:00
|
|
|
mtspr SPRN_AMR, \gpr2
|
|
|
|
isync
|
2020-11-27 04:44:12 +00:00
|
|
|
102:
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if entering from kernel we don't need save IAMR
|
|
|
|
*/
|
|
|
|
.ifnb \msr_pr_cr
|
|
|
|
beq \msr_pr_cr, 100f // from kernel space
|
|
|
|
mfspr \gpr1, SPRN_IAMR
|
|
|
|
std \gpr1, STACK_REGS_IAMR(r1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update kernel IAMR with AMR_KUEP_BLOCKED only
|
|
|
|
* if KUEP feature is enabled
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(70)
|
|
|
|
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
|
|
|
|
mtspr SPRN_IAMR, \gpr2
|
|
|
|
isync
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
|
|
|
|
.endif
|
|
|
|
|
|
|
|
100: // skip_save_amr
|
2019-04-18 06:51:24 +00:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#else /* !__ASSEMBLY__ */
|
|
|
|
|
2020-11-23 07:40:16 +00:00
|
|
|
#include <linux/jump_label.h>
|
|
|
|
|
2020-11-17 05:59:13 +00:00
|
|
|
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
|
|
|
|
|
2020-11-27 04:44:12 +00:00
|
|
|
#ifdef CONFIG_PPC_PKEY
|
2019-04-18 06:51:24 +00:00
|
|
|
|
2021-02-06 02:56:34 +00:00
|
|
|
extern u64 __ro_after_init default_uamor;
|
|
|
|
extern u64 __ro_after_init default_amr;
|
|
|
|
extern u64 __ro_after_init default_iamr;
|
|
|
|
|
2020-02-25 17:35:34 +00:00
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
2021-02-06 02:56:34 +00:00
|
|
|
/* usage of kthread_use_mm() should inherit the
|
|
|
|
* AMR value of the operating address space. But, the AMR value is
|
|
|
|
* thread-specific and we inherit the address space and not thread
|
|
|
|
* access restrictions. Because of this ignore AMR value when accessing
|
|
|
|
* userspace via kernel thread.
|
2020-11-27 04:44:16 +00:00
|
|
|
*/
|
|
|
|
static inline u64 current_thread_amr(void)
|
|
|
|
{
|
|
|
|
if (current->thread.regs)
|
|
|
|
return current->thread.regs->amr;
|
2021-02-06 02:56:34 +00:00
|
|
|
return default_amr;
|
2020-11-27 04:44:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 current_thread_iamr(void)
|
|
|
|
{
|
|
|
|
if (current->thread.regs)
|
|
|
|
return current->thread.regs->iamr;
|
2021-02-06 02:56:34 +00:00
|
|
|
return default_iamr;
|
2020-11-27 04:44:16 +00:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_PKEY */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
|
2021-10-19 07:29:21 +00:00
|
|
|
static __always_inline bool kuap_is_disabled(void)
|
|
|
|
{
|
|
|
|
return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
|
|
|
|
}
|
|
|
|
|
2020-11-27 04:44:12 +00:00
|
|
|
static inline void kuap_user_restore(struct pt_regs *regs)
|
|
|
|
{
|
2020-11-27 04:44:24 +00:00
|
|
|
bool restore_amr = false, restore_iamr = false;
|
|
|
|
unsigned long amr, iamr;
|
|
|
|
|
2020-11-27 04:44:12 +00:00
|
|
|
if (!mmu_has_feature(MMU_FTR_PKEY))
|
|
|
|
return;
|
|
|
|
|
2020-11-27 04:44:24 +00:00
|
|
|
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
|
|
|
|
amr = mfspr(SPRN_AMR);
|
|
|
|
if (amr != regs->amr)
|
|
|
|
restore_amr = true;
|
|
|
|
} else {
|
|
|
|
restore_amr = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
|
|
|
|
iamr = mfspr(SPRN_IAMR);
|
|
|
|
if (iamr != regs->iamr)
|
|
|
|
restore_iamr = true;
|
|
|
|
} else {
|
|
|
|
restore_iamr = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (restore_amr || restore_iamr) {
|
|
|
|
isync();
|
|
|
|
if (restore_amr)
|
|
|
|
mtspr(SPRN_AMR, regs->amr);
|
|
|
|
if (restore_iamr)
|
|
|
|
mtspr(SPRN_IAMR, regs->iamr);
|
|
|
|
}
|
2020-11-27 04:44:12 +00:00
|
|
|
/*
|
|
|
|
* No isync required here because we are about to rfi
|
|
|
|
* back to previous context before any user accesses
|
|
|
|
* would be made, which is a CSI.
|
|
|
|
*/
|
|
|
|
}
|
2020-11-27 04:44:24 +00:00
|
|
|
|
2021-10-19 07:29:20 +00:00
|
|
|
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-25 17:35:37 +00:00
|
|
|
{
|
2021-10-19 07:29:21 +00:00
|
|
|
if (likely(regs->amr == amr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
isync();
|
|
|
|
mtspr(SPRN_AMR, regs->amr);
|
2020-11-27 04:44:12 +00:00
|
|
|
/*
|
2021-10-19 07:29:21 +00:00
|
|
|
* No isync required here because we are about to rfi
|
|
|
|
* back to previous context before any user accesses
|
|
|
|
* would be made, which is a CSI.
|
|
|
|
*
|
2020-11-27 04:44:12 +00:00
|
|
|
* No need to restore IAMR when returning to kernel space.
|
|
|
|
*/
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-25 17:35:37 +00:00
|
|
|
}
|
|
|
|
|
2021-10-19 07:29:20 +00:00
|
|
|
static inline unsigned long __kuap_get_and_assert_locked(void)
|
2020-04-29 06:56:51 +00:00
|
|
|
{
|
2021-10-19 07:29:21 +00:00
|
|
|
unsigned long amr = mfspr(SPRN_AMR);
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
|
|
|
|
WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
|
|
|
|
return amr;
|
2020-04-29 06:56:51 +00:00
|
|
|
}
|
|
|
|
|
2021-10-19 07:29:26 +00:00
|
|
|
/* Do nothing, book3s/64 does that in ASM */
|
|
|
|
static inline void __kuap_lock(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __kuap_save_and_lock(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-04-18 06:51:24 +00:00
|
|
|
/*
|
|
|
|
* We support individually allowing read or write, but we don't support nesting
|
|
|
|
* because that would require an expensive read/modify write of the AMR.
|
|
|
|
*/
|
|
|
|
|
2020-01-24 11:54:45 +00:00
|
|
|
static inline unsigned long get_kuap(void)
|
|
|
|
{
|
2020-11-17 05:59:13 +00:00
|
|
|
/*
|
|
|
|
* We return AMR_KUAP_BLOCKED when we don't support KUAP because
|
|
|
|
* prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
|
|
|
|
* cause restore_user_access to do a flush.
|
|
|
|
*
|
|
|
|
* This has no effect in terms of actually blocking things on hash,
|
|
|
|
* so it doesn't break anything.
|
|
|
|
*/
|
powerpc/64s/kuap: Use mmu_has_feature()
In commit 8150a153c013 ("powerpc/64s: Use early_mmu_has_feature() in
set_kuap()") we switched the KUAP code to use early_mmu_has_feature(),
to avoid a bug where we called set_kuap() before feature patching had
been done, leading to recursion and crashes.
That path, which called probe_kernel_read() from printk(), has since
been removed, see commit 2ac5a3bf7042 ("vsprintf: Do not break early
boot with probing addresses").
Additionally probe_kernel_read() no longer invokes any KUAP routines,
since commit fe557319aa06 ("maccess: rename probe_kernel_{read,write}
to copy_{from,to}_kernel_nofault") and c33165253492 ("powerpc: use
non-set_fs based maccess routines").
So it should now be safe to use mmu_has_feature() in the KUAP
routines, because we shouldn't invoke them prior to feature patching.
This is essentially a revert of commit 8150a153c013 ("powerpc/64s: Use
early_mmu_has_feature() in set_kuap()"), but we've since added a
second usage of early_mmu_has_feature() in get_kuap(), so we convert
that to use mmu_has_feature() as well.
Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Depends-on: c33165253492 ("powerpc: use non-set_fs based maccess routines").
Link: https://lore.kernel.org/r/20201217005306.895685-1-mpe@ellerman.id.au
2020-12-17 00:53:06 +00:00
|
|
|
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
2020-11-17 05:59:13 +00:00
|
|
|
return AMR_KUAP_BLOCKED;
|
2020-01-24 11:54:45 +00:00
|
|
|
|
|
|
|
return mfspr(SPRN_AMR);
|
|
|
|
}
|
|
|
|
|
2019-04-18 06:51:24 +00:00
|
|
|
static inline void set_kuap(unsigned long value)
|
|
|
|
{
|
powerpc/64s/kuap: Use mmu_has_feature()
In commit 8150a153c013 ("powerpc/64s: Use early_mmu_has_feature() in
set_kuap()") we switched the KUAP code to use early_mmu_has_feature(),
to avoid a bug where we called set_kuap() before feature patching had
been done, leading to recursion and crashes.
That path, which called probe_kernel_read() from printk(), has since
been removed, see commit 2ac5a3bf7042 ("vsprintf: Do not break early
boot with probing addresses").
Additionally probe_kernel_read() no longer invokes any KUAP routines,
since commit fe557319aa06 ("maccess: rename probe_kernel_{read,write}
to copy_{from,to}_kernel_nofault") and c33165253492 ("powerpc: use
non-set_fs based maccess routines").
So it should now be safe to use mmu_has_feature() in the KUAP
routines, because we shouldn't invoke them prior to feature patching.
This is essentially a revert of commit 8150a153c013 ("powerpc/64s: Use
early_mmu_has_feature() in set_kuap()"), but we've since added a
second usage of early_mmu_has_feature() in get_kuap(), so we convert
that to use mmu_has_feature() as well.
Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Depends-on: c33165253492 ("powerpc: use non-set_fs based maccess routines").
Link: https://lore.kernel.org/r/20201217005306.895685-1-mpe@ellerman.id.au
2020-12-17 00:53:06 +00:00
|
|
|
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
2019-04-18 06:51:24 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
|
|
|
|
* before and after the move to AMR. See table 6 on page 1134.
|
|
|
|
*/
|
|
|
|
isync();
|
|
|
|
mtspr(SPRN_AMR, value);
|
|
|
|
isync();
|
|
|
|
}
|
|
|
|
|
2021-10-19 07:29:20 +00:00
|
|
|
static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
2020-11-17 05:59:13 +00:00
|
|
|
{
|
2020-11-27 04:44:18 +00:00
|
|
|
/*
|
2020-12-08 03:15:39 +00:00
|
|
|
* For radix this will be a storage protection fault (DSISR_PROTFAULT).
|
|
|
|
* For hash this will be a key fault (DSISR_KEYFAULT)
|
2020-11-27 04:44:18 +00:00
|
|
|
*/
|
2020-12-08 03:15:39 +00:00
|
|
|
/*
|
|
|
|
* We do have exception table entry, but accessing the
|
|
|
|
* userspace results in fault. This could be because we
|
|
|
|
* didn't unlock the AMR or access is denied by userspace
|
|
|
|
* using a key value that blocks access. We are only interested
|
|
|
|
* in catching the use case of accessing without unlocking
|
|
|
|
* the AMR. Hence check for BLOCK_WRITE/READ against AMR.
|
|
|
|
*/
|
|
|
|
if (is_write) {
|
2020-12-09 05:29:23 +00:00
|
|
|
return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
|
2020-12-08 03:15:39 +00:00
|
|
|
}
|
2020-12-09 05:29:23 +00:00
|
|
|
return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
|
2020-11-17 05:59:13 +00:00
|
|
|
}
|
|
|
|
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
|
|
|
unsigned long size, unsigned long dir)
|
2019-04-18 06:51:24 +00:00
|
|
|
{
|
2020-11-27 04:44:17 +00:00
|
|
|
unsigned long thread_amr = 0;
|
|
|
|
|
2019-04-18 06:51:24 +00:00
|
|
|
// This is written so we can resolve to a single case at build time
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
BUILD_BUG_ON(!__builtin_constant_p(dir));
|
2020-11-27 04:44:17 +00:00
|
|
|
|
|
|
|
if (mmu_has_feature(MMU_FTR_PKEY))
|
|
|
|
thread_amr = current_thread_amr();
|
|
|
|
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
if (dir == KUAP_READ)
|
2020-11-27 04:44:17 +00:00
|
|
|
set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
else if (dir == KUAP_WRITE)
|
2020-11-27 04:44:17 +00:00
|
|
|
set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
|
2020-01-24 11:54:43 +00:00
|
|
|
else if (dir == KUAP_READ_WRITE)
|
2020-11-27 04:44:17 +00:00
|
|
|
set_kuap(thread_amr);
|
2020-01-24 11:54:43 +00:00
|
|
|
else
|
|
|
|
BUILD_BUG();
|
2019-04-18 06:51:24 +00:00
|
|
|
}
|
|
|
|
|
2020-11-27 04:44:12 +00:00
|
|
|
#else /* CONFIG_PPC_KUAP */
|
|
|
|
|
|
|
|
static inline unsigned long get_kuap(void)
|
|
|
|
{
|
|
|
|
return AMR_KUAP_BLOCKED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_kuap(unsigned long value) { }
|
|
|
|
|
|
|
|
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
|
|
|
unsigned long size, unsigned long dir)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
#endif /* !CONFIG_PPC_KUAP */
|
|
|
|
|
2021-06-03 08:41:48 +00:00
|
|
|
static inline void prevent_user_access(unsigned long dir)
|
2019-04-18 06:51:24 +00:00
|
|
|
{
|
|
|
|
set_kuap(AMR_KUAP_BLOCKED);
|
2020-11-17 05:59:13 +00:00
|
|
|
if (static_branch_unlikely(&uaccess_flush_key))
|
|
|
|
do_uaccess_flush();
|
2019-04-18 06:51:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 11:54:45 +00:00
|
|
|
static inline unsigned long prevent_user_access_return(void)
|
|
|
|
{
|
|
|
|
unsigned long flags = get_kuap();
|
|
|
|
|
|
|
|
set_kuap(AMR_KUAP_BLOCKED);
|
2020-11-17 05:59:13 +00:00
|
|
|
if (static_branch_unlikely(&uaccess_flush_key))
|
|
|
|
do_uaccess_flush();
|
2020-01-24 11:54:45 +00:00
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void restore_user_access(unsigned long flags)
|
|
|
|
{
|
|
|
|
set_kuap(flags);
|
2020-11-17 05:59:13 +00:00
|
|
|
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
|
|
|
|
do_uaccess_flush();
|
2020-01-24 11:54:45 +00:00
|
|
|
}
|
2019-04-18 06:51:24 +00:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
2020-11-27 04:44:07 +00:00
|
|
|
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
|