55ccf3fe3f
Historical prepare_to_copy() is mostly a no-op, duplicated for majority of the architectures and the rest following the x86 model of flushing the extended register state like fpu there. Remove it and use the arch_dup_task_struct() instead. Suggested-by: Oleg Nesterov <oleg@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Link: http://lkml.kernel.org/r/1336692811-30576-1-git-send-email-suresh.b.siddha@intel.com Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Chris Zankel <chris@zankel.net> Cc: Richard Henderson <rth@twiddle.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Haavard Skinnemoen <hskinnemoen@gmail.com> Cc: Mike Frysinger <vapier@gentoo.org> Cc: Mark Salter <msalter@redhat.com> Cc: Aurelien Jacquiot <a-jacquiot@ti.com> Cc: Mikael Starvik <starvik@axis.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: Helge Deller <deller@gmx.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Chen Liqin <liqin.chen@sunplusct.com> Cc: Lennox Wu <lennox.wu@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
111 lines
2.5 KiB
C
111 lines
2.5 KiB
C
#include <linux/mm.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
|
|
struct kmem_cache *task_xstate_cachep = NULL;
|
|
unsigned int xstate_size;
|
|
|
|
/*
|
|
* this gets called so that we can store lazy state into memory and copy the
|
|
* current task into the new thread.
|
|
*/
|
|
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|
{
|
|
#ifdef CONFIG_SUPERH32
|
|
unlazy_fpu(src, task_pt_regs(src));
|
|
#endif
|
|
*dst = *src;
|
|
|
|
if (src->thread.xstate) {
|
|
dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
|
|
GFP_KERNEL);
|
|
if (!dst->thread.xstate)
|
|
return -ENOMEM;
|
|
memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void free_thread_xstate(struct task_struct *tsk)
|
|
{
|
|
if (tsk->thread.xstate) {
|
|
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
|
|
tsk->thread.xstate = NULL;
|
|
}
|
|
}
|
|
|
|
#if THREAD_SHIFT < PAGE_SHIFT
|
|
static struct kmem_cache *thread_info_cache;
|
|
|
|
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
|
|
{
|
|
struct thread_info *ti;
|
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
|
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
|
|
#else
|
|
gfp_t mask = GFP_KERNEL;
|
|
#endif
|
|
|
|
ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
|
|
return ti;
|
|
}
|
|
|
|
void free_thread_info(struct thread_info *ti)
|
|
{
|
|
free_thread_xstate(ti->task);
|
|
kmem_cache_free(thread_info_cache, ti);
|
|
}
|
|
|
|
void thread_info_cache_init(void)
|
|
{
|
|
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
|
|
THREAD_SIZE, SLAB_PANIC, NULL);
|
|
}
|
|
#else
|
|
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
|
|
{
|
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
|
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
|
|
#else
|
|
gfp_t mask = GFP_KERNEL;
|
|
#endif
|
|
struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
|
|
|
|
return page ? page_address(page) : NULL;
|
|
}
|
|
|
|
void free_thread_info(struct thread_info *ti)
|
|
{
|
|
free_thread_xstate(ti->task);
|
|
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
|
|
}
|
|
#endif /* THREAD_SHIFT < PAGE_SHIFT */
|
|
|
|
void arch_task_cache_init(void)
|
|
{
|
|
if (!xstate_size)
|
|
return;
|
|
|
|
task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
|
|
__alignof__(union thread_xstate),
|
|
SLAB_PANIC | SLAB_NOTRACK, NULL);
|
|
}
|
|
|
|
#ifdef CONFIG_SH_FPU_EMU
|
|
# define HAVE_SOFTFP 1
|
|
#else
|
|
# define HAVE_SOFTFP 0
|
|
#endif
|
|
|
|
void __cpuinit init_thread_xstate(void)
|
|
{
|
|
if (boot_cpu_data.flags & CPU_HAS_FPU)
|
|
xstate_size = sizeof(struct sh_fpu_hard_struct);
|
|
else if (HAVE_SOFTFP)
|
|
xstate_size = sizeof(struct sh_fpu_soft_struct);
|
|
else
|
|
xstate_size = 0;
|
|
}
|