| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 1 | #include <linux/mm.h> | 
 | 2 | #include <linux/kernel.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 3 | #include <linux/slab.h> | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 4 | #include <linux/sched.h> | 
 | 5 |  | 
| Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 6 | struct kmem_cache *task_xstate_cachep = NULL; | 
 | 7 | unsigned int xstate_size; | 
 | 8 |  | 
 | 9 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 
 | 10 | { | 
 | 11 | 	*dst = *src; | 
 | 12 |  | 
 | 13 | 	if (src->thread.xstate) { | 
 | 14 | 		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | 
 | 15 | 						      GFP_KERNEL); | 
 | 16 | 		if (!dst->thread.xstate) | 
 | 17 | 			return -ENOMEM; | 
 | 18 | 		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); | 
 | 19 | 	} | 
 | 20 |  | 
 | 21 | 	return 0; | 
 | 22 | } | 
 | 23 |  | 
 | 24 | void free_thread_xstate(struct task_struct *tsk) | 
 | 25 | { | 
 | 26 | 	if (tsk->thread.xstate) { | 
 | 27 | 		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); | 
 | 28 | 		tsk->thread.xstate = NULL; | 
 | 29 | 	} | 
 | 30 | } | 
 | 31 |  | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 32 | #if THREAD_SHIFT < PAGE_SHIFT | 
 | 33 | static struct kmem_cache *thread_info_cache; | 
 | 34 |  | 
| Nobuhiro Iwamatsu | b15ed69 | 2011-03-24 05:47:40 +0000 | [diff] [blame] | 35 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 36 | { | 
 | 37 | 	struct thread_info *ti; | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 38 | #ifdef CONFIG_DEBUG_STACK_USAGE | 
| Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 39 | 	gfp_t mask = GFP_KERNEL | __GFP_ZERO; | 
 | 40 | #else | 
 | 41 | 	gfp_t mask = GFP_KERNEL; | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 42 | #endif | 
| Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 43 |  | 
 | 44 | 	ti = kmem_cache_alloc_node(thread_info_cache, mask, node); | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 45 | 	return ti; | 
 | 46 | } | 
 | 47 |  | 
 | 48 | void free_thread_info(struct thread_info *ti) | 
 | 49 | { | 
| Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 50 | 	free_thread_xstate(ti->task); | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 51 | 	kmem_cache_free(thread_info_cache, ti); | 
 | 52 | } | 
 | 53 |  | 
 | 54 | void thread_info_cache_init(void) | 
 | 55 | { | 
 | 56 | 	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | 
| Paul Mundt | a370579 | 2010-01-12 19:10:06 +0900 | [diff] [blame] | 57 | 					      THREAD_SIZE, SLAB_PANIC, NULL); | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 58 | } | 
 | 59 | #else | 
| Nobuhiro Iwamatsu | b15ed69 | 2011-03-24 05:47:40 +0000 | [diff] [blame] | 60 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 61 | { | 
 | 62 | #ifdef CONFIG_DEBUG_STACK_USAGE | 
 | 63 | 	gfp_t mask = GFP_KERNEL | __GFP_ZERO; | 
 | 64 | #else | 
 | 65 | 	gfp_t mask = GFP_KERNEL; | 
 | 66 | #endif | 
| Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 67 | 	struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); | 
 | 68 |  | 
 | 69 | 	return page ? page_address(page) : NULL; | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 70 | } | 
 | 71 |  | 
 | 72 | void free_thread_info(struct thread_info *ti) | 
 | 73 | { | 
| Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 74 | 	free_thread_xstate(ti->task); | 
| Paul Mundt | cbf6b1b | 2010-01-12 19:01:11 +0900 | [diff] [blame] | 75 | 	free_pages((unsigned long)ti, THREAD_SIZE_ORDER); | 
 | 76 | } | 
 | 77 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | 
| Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 78 |  | 
 | 79 | void arch_task_cache_init(void) | 
 | 80 | { | 
 | 81 | 	if (!xstate_size) | 
 | 82 | 		return; | 
 | 83 |  | 
 | 84 | 	task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, | 
 | 85 | 					       __alignof__(union thread_xstate), | 
 | 86 | 					       SLAB_PANIC | SLAB_NOTRACK, NULL); | 
 | 87 | } | 
 | 88 |  | 
 | 89 | #ifdef CONFIG_SH_FPU_EMU | 
 | 90 | # define HAVE_SOFTFP	1 | 
 | 91 | #else | 
 | 92 | # define HAVE_SOFTFP	0 | 
 | 93 | #endif | 
 | 94 |  | 
| Paul Mundt | 4a6feab | 2010-04-21 12:20:42 +0900 | [diff] [blame] | 95 | void __cpuinit init_thread_xstate(void) | 
| Paul Mundt | 0ea820c | 2010-01-13 12:51:40 +0900 | [diff] [blame] | 96 | { | 
 | 97 | 	if (boot_cpu_data.flags & CPU_HAS_FPU) | 
 | 98 | 		xstate_size = sizeof(struct sh_fpu_hard_struct); | 
 | 99 | 	else if (HAVE_SOFTFP) | 
 | 100 | 		xstate_size = sizeof(struct sh_fpu_soft_struct); | 
 | 101 | 	else | 
 | 102 | 		xstate_size = 0; | 
 | 103 | } |