sched/x86: Save [ER]FLAGS on context switch
Effectively reverts commit:2c7577a758
("sched/x86_64: Don't save flags on context switch") Specifically because SMAP uses FLAGS.AC which invalidates the claim that the kernel has clean flags. In particular; while preemption from interrupt return is fine (the IRET frame on the exception stack contains FLAGS) it breaks any code that does synchonous scheduling, including preempt_enable(). This has become a significant issue ever since commit:5b24a7a2aa
("Add 'unsafe' user access functions for batched accesses") provided for means of having 'normal' C code between STAC / CLAC, exposing the FLAGS.AC state. So far this hasn't led to trouble, however fix it before it comes apart. Reported-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: stable@kernel.org Fixes:5b24a7a2aa
("Add 'unsafe' user access functions for batched accesses") Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
5e7a8ca319
commit
6690e86be8
@ -650,6 +650,7 @@ ENTRY(__switch_to_asm)
|
||||
pushl %ebx
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
pushfl
|
||||
|
||||
/* switch stack */
|
||||
movl %esp, TASK_threadsp(%eax)
|
||||
@ -672,6 +673,7 @@ ENTRY(__switch_to_asm)
|
||||
#endif
|
||||
|
||||
/* restore callee-saved registers */
|
||||
popfl
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebx
|
||||
|
@ -291,6 +291,7 @@ ENTRY(__switch_to_asm)
|
||||
pushq %r13
|
||||
pushq %r14
|
||||
pushq %r15
|
||||
pushfq
|
||||
|
||||
/* switch stack */
|
||||
movq %rsp, TASK_threadsp(%rdi)
|
||||
@ -313,6 +314,7 @@ ENTRY(__switch_to_asm)
|
||||
#endif
|
||||
|
||||
/* restore callee-saved registers */
|
||||
popfq
|
||||
popq %r15
|
||||
popq %r14
|
||||
popq %r13
|
||||
|
@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void);
|
||||
* order of the fields must match the code in __switch_to_asm().
|
||||
*/
|
||||
struct inactive_task_frame {
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_X86_64
|
||||
unsigned long r15;
|
||||
unsigned long r14;
|
||||
|
@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
|
||||
struct task_struct *tsk;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* For a new task use the RESET flags value since there is no before.
|
||||
* All the status flags are zero; DF and all the system flags must also
|
||||
* be 0, specifically IF must be 0 because we context switch to the new
|
||||
* task with interrupts disabled.
|
||||
*/
|
||||
frame->flags = X86_EFLAGS_FIXED;
|
||||
frame->bp = 0;
|
||||
frame->ret_addr = (unsigned long) ret_from_fork;
|
||||
p->thread.sp = (unsigned long) fork_frame;
|
||||
|
@ -392,6 +392,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
|
||||
childregs = task_pt_regs(p);
|
||||
fork_frame = container_of(childregs, struct fork_frame, regs);
|
||||
frame = &fork_frame->frame;
|
||||
|
||||
/*
|
||||
* For a new task use the RESET flags value since there is no before.
|
||||
* All the status flags are zero; DF and all the system flags must also
|
||||
* be 0, specifically IF must be 0 because we context switch to the new
|
||||
* task with interrupts disabled.
|
||||
*/
|
||||
frame->flags = X86_EFLAGS_FIXED;
|
||||
frame->bp = 0;
|
||||
frame->ret_addr = (unsigned long) ret_from_fork;
|
||||
p->thread.sp = (unsigned long) fork_frame;
|
||||
|
Loading…
Reference in New Issue
Block a user