forked from Minki/linux
powerpc: Add ppc_strict_facility_enable boot option
Add a boot option that strictly manages the MSR unavailable bits. This catches kernel uses of FP/Altivec/SPE that would otherwise corrupt user state. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
dc4fbba11e
commit
3eb5d5888d
@ -2978,6 +2978,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
may be specified.
|
||||
Format: <port>,<port>....
|
||||
|
||||
ppc_strict_facility_enable
|
||||
[PPC] This option catches any kernel floating point,
|
||||
Altivec, VSX and SPE outside of regions specifically
|
||||
allowed (eg kernel_enable_fpu()/kernel_disable_fpu()).
|
||||
There is some performance impact when enabling this.
|
||||
|
||||
print-fatal-signals=
|
||||
[KNL] debug: print fatal signals
|
||||
|
||||
|
@ -1214,6 +1214,15 @@ static inline void mtmsr_isync(unsigned long val)
|
||||
: "r" ((unsigned long)(v)) \
|
||||
: "memory")
|
||||
|
||||
extern void msr_check_and_set(unsigned long bits);
|
||||
extern bool strict_msr_control;
|
||||
extern void __msr_check_and_clear(unsigned long bits);
|
||||
static inline void msr_check_and_clear(unsigned long bits)
|
||||
{
|
||||
if (strict_msr_control)
|
||||
__msr_check_and_clear(bits);
|
||||
}
|
||||
|
||||
static inline unsigned long mfvtb (void)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
|
@ -4,6 +4,8 @@
|
||||
#ifndef _ASM_POWERPC_SWITCH_TO_H
|
||||
#define _ASM_POWERPC_SWITCH_TO_H
|
||||
|
||||
#include <asm/reg.h>
|
||||
|
||||
struct thread_struct;
|
||||
struct task_struct;
|
||||
struct pt_regs;
|
||||
@ -26,15 +28,15 @@ extern void enable_kernel_spe(void);
|
||||
extern void load_up_spe(struct task_struct *);
|
||||
extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
||||
|
||||
static inline void disable_kernel_fp(void) { }
|
||||
static inline void disable_kernel_altivec(void) { }
|
||||
static inline void disable_kernel_spe(void) { }
|
||||
static inline void disable_kernel_vsx(void) { }
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
extern void flush_fp_to_thread(struct task_struct *);
|
||||
extern void giveup_fpu(struct task_struct *);
|
||||
extern void __giveup_fpu(struct task_struct *);
|
||||
static inline void disable_kernel_fp(void)
|
||||
{
|
||||
msr_check_and_clear(MSR_FP);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void flush_fp_to_thread(struct task_struct *t) { }
|
||||
static inline void giveup_fpu(struct task_struct *t) { }
|
||||
@ -45,6 +47,10 @@ static inline void __giveup_fpu(struct task_struct *t) { }
|
||||
extern void flush_altivec_to_thread(struct task_struct *);
|
||||
extern void giveup_altivec(struct task_struct *);
|
||||
extern void __giveup_altivec(struct task_struct *);
|
||||
static inline void disable_kernel_altivec(void)
|
||||
{
|
||||
msr_check_and_clear(MSR_VEC);
|
||||
}
|
||||
#else
|
||||
static inline void flush_altivec_to_thread(struct task_struct *t) { }
|
||||
static inline void giveup_altivec(struct task_struct *t) { }
|
||||
@ -53,6 +59,10 @@ static inline void __giveup_altivec(struct task_struct *t) { }
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
extern void flush_vsx_to_thread(struct task_struct *);
|
||||
static inline void disable_kernel_vsx(void)
|
||||
{
|
||||
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
||||
}
|
||||
#else
|
||||
static inline void flush_vsx_to_thread(struct task_struct *t)
|
||||
{
|
||||
@ -63,6 +73,10 @@ static inline void flush_vsx_to_thread(struct task_struct *t)
|
||||
extern void flush_spe_to_thread(struct task_struct *);
|
||||
extern void giveup_spe(struct task_struct *);
|
||||
extern void __giveup_spe(struct task_struct *);
|
||||
static inline void disable_kernel_spe(void)
|
||||
{
|
||||
msr_check_and_clear(MSR_SPE);
|
||||
}
|
||||
#else
|
||||
static inline void flush_spe_to_thread(struct task_struct *t) { }
|
||||
static inline void giveup_spe(struct task_struct *t) { }
|
||||
|
@ -87,7 +87,19 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
|
||||
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
static void msr_check_and_set(unsigned long bits)
|
||||
bool strict_msr_control;
|
||||
EXPORT_SYMBOL(strict_msr_control);
|
||||
|
||||
static int __init enable_strict_msr_control(char *str)
|
||||
{
|
||||
strict_msr_control = true;
|
||||
pr_info("Enabling strict facility control\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("ppc_strict_facility_enable", enable_strict_msr_control);
|
||||
|
||||
void msr_check_and_set(unsigned long bits)
|
||||
{
|
||||
unsigned long oldmsr = mfmsr();
|
||||
unsigned long newmsr;
|
||||
@ -103,7 +115,7 @@ static void msr_check_and_set(unsigned long bits)
|
||||
mtmsr_isync(newmsr);
|
||||
}
|
||||
|
||||
static void msr_check_and_clear(unsigned long bits)
|
||||
void __msr_check_and_clear(unsigned long bits)
|
||||
{
|
||||
unsigned long oldmsr = mfmsr();
|
||||
unsigned long newmsr;
|
||||
@ -118,6 +130,7 @@ static void msr_check_and_clear(unsigned long bits)
|
||||
if (oldmsr != newmsr)
|
||||
mtmsr_isync(newmsr);
|
||||
}
|
||||
EXPORT_SYMBOL(__msr_check_and_clear);
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
void giveup_fpu(struct task_struct *tsk)
|
||||
|
Loading…
Reference in New Issue
Block a user