blob: f3f03e4c785d4216da494fad8a026c7cf2f32820 [file] [log] [blame]
Paul Mundtcbf6b1b2010-01-12 19:01:11 +09001#include <linux/mm.h>
2#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09003#include <linux/slab.h>
Paul Mundtcbf6b1b2010-01-12 19:01:11 +09004#include <linux/sched.h>
Filippo Arcidiacono5d920bb2012-04-19 15:45:57 +09005#include <linux/export.h>
6#include <linux/stackprotector.h>
Paul Mundtcbf6b1b2010-01-12 19:01:11 +09007
Paul Mundt0ea820c2010-01-13 12:51:40 +09008struct kmem_cache *task_xstate_cachep = NULL;
9unsigned int xstate_size;
10
Filippo Arcidiacono5d920bb2012-04-19 15:45:57 +090011#ifdef CONFIG_CC_STACKPROTECTOR
12unsigned long __stack_chk_guard __read_mostly;
13EXPORT_SYMBOL(__stack_chk_guard);
14#endif
15
Paul Mundt0ea820c2010-01-13 12:51:40 +090016int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
17{
18 *dst = *src;
19
20 if (src->thread.xstate) {
21 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
22 GFP_KERNEL);
23 if (!dst->thread.xstate)
24 return -ENOMEM;
25 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
26 }
27
28 return 0;
29}
30
31void free_thread_xstate(struct task_struct *tsk)
32{
33 if (tsk->thread.xstate) {
34 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
35 tsk->thread.xstate = NULL;
36 }
37}
38
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090039#if THREAD_SHIFT < PAGE_SHIFT
40static struct kmem_cache *thread_info_cache;
41
Nobuhiro Iwamatsub15ed692011-03-24 05:47:40 +000042struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090043{
44 struct thread_info *ti;
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090045#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Dumazetb6a84012011-03-22 16:30:42 -070046 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
47#else
48 gfp_t mask = GFP_KERNEL;
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090049#endif
Eric Dumazetb6a84012011-03-22 16:30:42 -070050
51 ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090052 return ti;
53}
54
55void free_thread_info(struct thread_info *ti)
56{
Paul Mundt0ea820c2010-01-13 12:51:40 +090057 free_thread_xstate(ti->task);
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090058 kmem_cache_free(thread_info_cache, ti);
59}
60
61void thread_info_cache_init(void)
62{
63 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
Paul Mundta3705792010-01-12 19:10:06 +090064 THREAD_SIZE, SLAB_PANIC, NULL);
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090065}
66#else
Nobuhiro Iwamatsub15ed692011-03-24 05:47:40 +000067struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090068{
69#ifdef CONFIG_DEBUG_STACK_USAGE
70 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
71#else
72 gfp_t mask = GFP_KERNEL;
73#endif
Eric Dumazetb6a84012011-03-22 16:30:42 -070074 struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
75
76 return page ? page_address(page) : NULL;
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090077}
78
79void free_thread_info(struct thread_info *ti)
80{
Paul Mundt0ea820c2010-01-13 12:51:40 +090081 free_thread_xstate(ti->task);
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090082 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
83}
84#endif /* THREAD_SHIFT < PAGE_SHIFT */
Paul Mundt0ea820c2010-01-13 12:51:40 +090085
86void arch_task_cache_init(void)
87{
88 if (!xstate_size)
89 return;
90
91 task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
92 __alignof__(union thread_xstate),
93 SLAB_PANIC | SLAB_NOTRACK, NULL);
94}
95
96#ifdef CONFIG_SH_FPU_EMU
97# define HAVE_SOFTFP 1
98#else
99# define HAVE_SOFTFP 0
100#endif
101
Paul Mundt4a6feab2010-04-21 12:20:42 +0900102void __cpuinit init_thread_xstate(void)
Paul Mundt0ea820c2010-01-13 12:51:40 +0900103{
104 if (boot_cpu_data.flags & CPU_HAS_FPU)
105 xstate_size = sizeof(struct sh_fpu_hard_struct);
106 else if (HAVE_SOFTFP)
107 xstate_size = sizeof(struct sh_fpu_soft_struct);
108 else
109 xstate_size = 0;
110}