x86/fpu: Rename math_state_restore() to fpu__restore()

Move to the new fpu__*() namespace.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-04-22 13:16:47 +02:00
parent 93b90712c6
commit 3a0aee4801
8 changed files with 11 additions and 11 deletions

View File

@ -48,7 +48,7 @@ preemption must be disabled around such regions.
Note, some FPU functions are already explicitly preempt safe. For example, Note, some FPU functions are already explicitly preempt safe. For example,
kernel_fpu_begin and kernel_fpu_end will disable and enable preemption. kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
However, math_state_restore must be called with preemption disabled. However, fpu__restore() must be called with preemption disabled.
RULE #3: Lock acquire and release must be performed by same task RULE #3: Lock acquire and release must be performed by same task

View File

@ -23,7 +23,7 @@ extern void fpstate_init(struct fpu *fpu);
extern void fpu__flush_thread(struct task_struct *tsk); extern void fpu__flush_thread(struct task_struct *tsk);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern void math_state_restore(void); extern void fpu__restore(void);
extern bool irq_fpu_usable(void); extern bool irq_fpu_usable(void);

View File

@ -228,7 +228,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
} }
/* /*
* 'math_state_restore()' saves the current math information in the * 'fpu__restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task * old math state array, and gets the new ones from the current task
* *
* Careful.. There are problems with IBM-designed IRQ13 behaviour. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
@ -237,7 +237,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
* Must be called with kernel preemption disabled (eg with local * Must be called with kernel preemption disabled (eg with local
* local interrupts as in the case of do_device_not_available). * local interrupts as in the case of do_device_not_available).
*/ */
void math_state_restore(void) void fpu__restore(void)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
@ -267,7 +267,7 @@ void math_state_restore(void)
} }
kernel_fpu_enable(); kernel_fpu_enable();
} }
EXPORT_SYMBOL_GPL(math_state_restore); EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__flush_thread(struct task_struct *tsk) void fpu__flush_thread(struct task_struct *tsk)
{ {

View File

@ -404,7 +404,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
set_used_math(); set_used_math();
if (use_eager_fpu()) { if (use_eager_fpu()) {
preempt_disable(); preempt_disable();
math_state_restore(); fpu__restore();
preempt_enable(); preempt_enable();
} }

View File

@ -295,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* Leave lazy mode, flushing any hypercalls made here. * Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so * This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be * the GDT and LDT are properly updated, and must be
* done before math_state_restore, so the TS bit is up * done before fpu__restore(), so the TS bit is up
* to date. * to date.
*/ */
arch_end_context_switch(next_p); arch_end_context_switch(next_p);

View File

@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* Leave lazy mode, flushing any hypercalls made here. This * Leave lazy mode, flushing any hypercalls made here. This
* must be done after loading TLS entries in the GDT but before * must be done after loading TLS entries in the GDT but before
* loading segments that might reference them, and and it must * loading segments that might reference them, and and it must
* be done before math_state_restore, so the TS bit is up to * be done before fpu__restore(), so the TS bit is up to
* date. * date.
*/ */
arch_end_context_switch(next_p); arch_end_context_switch(next_p);

View File

@ -846,7 +846,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
return; return;
} }
#endif #endif
math_state_restore(); /* interrupts still off */ fpu__restore(); /* interrupts still off */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
conditional_sti(regs); conditional_sti(regs);
#endif #endif

View File

@ -297,12 +297,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
/* /*
* Similarly, if we took a trap because the Guest used the FPU, * Similarly, if we took a trap because the Guest used the FPU,
* we have to restore the FPU it expects to see. * we have to restore the FPU it expects to see.
* math_state_restore() may sleep and we may even move off to * fpu__restore() may sleep and we may even move off to
* a different CPU. So all the critical stuff should be done * a different CPU. So all the critical stuff should be done
* before this. * before this.
*/ */
else if (cpu->regs->trapnum == 7 && !user_has_fpu()) else if (cpu->regs->trapnum == 7 && !user_has_fpu())
math_state_restore(); fpu__restore();
} }
/*H:130 /*H:130