| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 1 | /* MN10300 Process handling code |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/sched.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/smp.h> |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 17 | #include <linux/stddef.h> |
| 18 | #include <linux/unistd.h> |
| 19 | #include <linux/ptrace.h> |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 20 | #include <linux/user.h> |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/delay.h> |
| 23 | #include <linux/reboot.h> |
| 24 | #include <linux/percpu.h> |
| 25 | #include <linux/err.h> |
| 26 | #include <linux/fs.h> |
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
| Frederic Weisbecker | 5b0753a | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 28 | #include <linux/rcupdate.h> |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 29 | #include <asm/uaccess.h> |
| 30 | #include <asm/pgtable.h> |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 31 | #include <asm/io.h> |
| 32 | #include <asm/processor.h> |
| 33 | #include <asm/mmu_context.h> |
| 34 | #include <asm/fpu.h> |
| 35 | #include <asm/reset-regs.h> |
| 36 | #include <asm/gdb-stub.h> |
| 37 | #include "internal.h" |
| 38 | |
| 39 | /* |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 40 | * return saved PC of a blocked thread. |
| 41 | */ |
| 42 | unsigned long thread_saved_pc(struct task_struct *tsk) |
| 43 | { |
| 44 | return ((unsigned long *) tsk->thread.sp)[3]; |
| 45 | } |
| 46 | |
| 47 | /* |
| 48 | * power off function, if any |
| 49 | */ |
| 50 | void (*pm_power_off)(void); |
| 51 | EXPORT_SYMBOL(pm_power_off); |
| 52 | |
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 53 | #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 54 | /* |
| 55 | * we use this if we don't have any better idle routine |
| 56 | */ |
| 57 | static void default_idle(void) |
| 58 | { |
| 59 | local_irq_disable(); |
| 60 | if (!need_resched()) |
| 61 | safe_halt(); |
| 62 | else |
| 63 | local_irq_enable(); |
| 64 | } |
| 65 | |
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 66 | #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 67 | /* |
| 68 | * On SMP it's slightly faster (but much more power-consuming!) |
| 69 | * to poll the ->work.need_resched flag instead of waiting for the |
| 70 | * cross-CPU IPI to arrive. Use this option with caution. |
| 71 | */ |
| 72 | static inline void poll_idle(void) |
| 73 | { |
| 74 | int oldval; |
| 75 | |
| 76 | local_irq_enable(); |
| 77 | |
| 78 | /* |
| 79 | * Deal with another CPU just having chosen a thread to |
| 80 | * run here: |
| 81 | */ |
| 82 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); |
| 83 | |
| 84 | if (!oldval) { |
| 85 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 86 | while (!need_resched()) |
| 87 | cpu_relax(); |
| 88 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 89 | } else { |
| 90 | set_need_resched(); |
| 91 | } |
| 92 | } |
| 93 | #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 94 | |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 95 | /* |
| 96 | * the idle thread |
| 97 | * - there's no useful work to be done, so just try to conserve power and have |
| 98 | * a low exit latency (ie sit in a loop waiting for somebody to say that |
| 99 | * they'd like to reschedule) |
| 100 | */ |
| 101 | void cpu_idle(void) |
| 102 | { |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 103 | /* endless idle loop with no priority at all */ |
| 104 | for (;;) { |
| Frederic Weisbecker | 5b0753a | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 105 | rcu_idle_enter(); |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 106 | while (!need_resched()) { |
| 107 | void (*idle)(void); |
| 108 | |
| 109 | smp_rmb(); |
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 110 | if (!idle) { |
| 111 | #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) |
| 112 | idle = poll_idle; |
| 113 | #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 114 | idle = default_idle; |
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 115 | #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ |
| 116 | } |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 117 | idle(); |
| 118 | } |
| Frederic Weisbecker | 5b0753a | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 119 | rcu_idle_exit(); |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 120 | |
| Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 121 | schedule_preempt_disabled(); |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 122 | } |
| 123 | } |
| 124 | |
| 125 | void release_segments(struct mm_struct *mm) |
| 126 | { |
| 127 | } |
| 128 | |
| 129 | void machine_restart(char *cmd) |
| 130 | { |
| David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 131 | #ifdef CONFIG_KERNEL_DEBUGGER |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 132 | gdbstub_exit(0); |
| 133 | #endif |
| 134 | |
| 135 | #ifdef mn10300_unit_hard_reset |
| 136 | mn10300_unit_hard_reset(); |
| 137 | #else |
| 138 | mn10300_proc_hard_reset(); |
| 139 | #endif |
| 140 | } |
| 141 | |
| 142 | void machine_halt(void) |
| 143 | { |
| David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 144 | #ifdef CONFIG_KERNEL_DEBUGGER |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 145 | gdbstub_exit(0); |
| 146 | #endif |
| 147 | } |
| 148 | |
| 149 | void machine_power_off(void) |
| 150 | { |
| David Howells | 044264b | 2011-03-18 16:54:31 +0000 | [diff] [blame] | 151 | #ifdef CONFIG_KERNEL_DEBUGGER |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 152 | gdbstub_exit(0); |
| 153 | #endif |
| 154 | } |
| 155 | |
| 156 | void show_regs(struct pt_regs *regs) |
| 157 | { |
| 158 | } |
| 159 | |
| 160 | /* |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 161 | * free current thread data structures etc.. |
| 162 | */ |
| 163 | void exit_thread(void) |
| 164 | { |
| 165 | exit_fpu(); |
| 166 | } |
| 167 | |
| 168 | void flush_thread(void) |
| 169 | { |
| 170 | flush_fpu(); |
| 171 | } |
| 172 | |
| 173 | void release_thread(struct task_struct *dead_task) |
| 174 | { |
| 175 | } |
| 176 | |
| 177 | /* |
| 178 | * we do not have to muck with descriptors here, that is |
| 179 | * done in switch_mm() as needed. |
| 180 | */ |
| 181 | void copy_segments(struct task_struct *p, struct mm_struct *new_mm) |
| 182 | { |
| 183 | } |
| 184 | |
| 185 | /* |
| Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 186 | * this gets called so that we can store lazy state into memory and copy the |
| 187 | * current task into the new thread. |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 188 | */ |
| Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 189 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 190 | { |
| Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 191 | unlazy_fpu(src); |
| 192 | *dst = *src; |
| 193 | return 0; |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | /* |
| 197 | * set up the kernel stack for a new thread and copy arch-specific thread |
| 198 | * control information |
| 199 | */ |
| Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 200 | int copy_thread(unsigned long clone_flags, |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 201 | unsigned long c_usp, unsigned long ustk_size, |
| Al Viro | afa86fc | 2012-10-22 22:51:14 -0400 | [diff] [blame] | 202 | struct task_struct *p) |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 203 | { |
| David Howells | 7c7fcf7 | 2010-10-27 17:29:01 +0100 | [diff] [blame] | 204 | struct thread_info *ti = task_thread_info(p); |
| Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 205 | struct pt_regs *c_regs; |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 206 | unsigned long c_ksp; |
| 207 | |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 208 | c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE; |
| 209 | |
| 210 | /* allocate the userspace exception frame and set it up */ |
| 211 | c_ksp -= sizeof(struct pt_regs); |
| Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 212 | c_regs = (struct pt_regs *) c_ksp; |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 213 | c_ksp -= 12; /* allocate function call ABI slack */ |
| 214 | |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 215 | /* set up things up so the scheduler can start the new task */ |
| Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 216 | p->thread.uregs = c_regs; |
| Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 217 | ti->frame = c_regs; |
| 218 | p->thread.a3 = (unsigned long) c_regs; |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 219 | p->thread.sp = c_ksp; |
| Al Viro | 255461c | 2012-09-19 13:05:49 -0400 | [diff] [blame] | 220 | p->thread.wchan = p->thread.pc; |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 221 | p->thread.usp = c_usp; |
| 222 | |
| Al Viro | 1ea2a01 | 2012-10-21 16:43:13 -0400 | [diff] [blame] | 223 | if (unlikely(p->flags & PF_KTHREAD)) { |
| Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 224 | memset(c_regs, 0, sizeof(struct pt_regs)); |
| 225 | c_regs->a0 = c_usp; /* function */ |
| 226 | c_regs->d0 = ustk_size; /* argument */ |
| 227 | local_save_flags(c_regs->epsw); |
| 228 | c_regs->epsw |= EPSW_IE | EPSW_IM_7; |
| 229 | p->thread.pc = (unsigned long) ret_from_kernel_thread; |
| 230 | return 0; |
| 231 | } |
| Al Viro | 1ea2a01 | 2012-10-21 16:43:13 -0400 | [diff] [blame] | 232 | *c_regs = *current_pt_regs(); |
| 233 | if (c_usp) |
| 234 | c_regs->sp = c_usp; |
| Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 235 | c_regs->epsw &= ~EPSW_FE; /* my FPU */ |
| 236 | |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 237 | /* the new TLS pointer is passed in as arg #5 to sys_clone() */ |
| 238 | if (clone_flags & CLONE_SETTLS) |
| Al Viro | 61b7fbc | 2012-09-22 18:18:23 -0400 | [diff] [blame] | 239 | c_regs->e2 = current_frame()->d3; |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 240 | |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 241 | p->thread.pc = (unsigned long) ret_from_fork; |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 242 | |
| 243 | return 0; |
| 244 | } |
| 245 | |
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 246 | unsigned long get_wchan(struct task_struct *p) |
| 247 | { |
| 248 | return p->thread.wchan; |
| 249 | } |