blob: 246d4c1a9d534f54dc7432cd040350a6389d1357 [file] [log] [blame]
Michal Simek6496a232009-03-27 14:25:26 +01001/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
Michal Simekd64af912013-02-01 13:10:35 +010011#include <linux/export.h>
Michal Simek6496a232009-03-27 14:25:26 +010012#include <linux/sched.h>
13#include <linux/pm.h>
14#include <linux/tick.h>
15#include <linux/bitops.h>
Al Virof3268ed2012-10-27 00:03:41 -040016#include <linux/ptrace.h>
Michal Simek6496a232009-03-27 14:25:26 +010017#include <asm/pgalloc.h>
Michal Simek6bd55f02012-12-27 10:40:38 +010018#include <linux/uaccess.h> /* for USER_DS macros */
Michal Simeka1f55112009-10-15 15:18:13 +020019#include <asm/cacheflush.h>
Michal Simek6496a232009-03-27 14:25:26 +010020
21void show_regs(struct pt_regs *regs)
22{
Michal Simek6bd55f02012-12-27 10:40:38 +010023 pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode);
24 pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010025 regs->r1, regs->r2, regs->r3, regs->r4);
Michal Simek6bd55f02012-12-27 10:40:38 +010026 pr_info(" r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010027 regs->r5, regs->r6, regs->r7, regs->r8);
Michal Simek6bd55f02012-12-27 10:40:38 +010028 pr_info(" r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010029 regs->r9, regs->r10, regs->r11, regs->r12);
Michal Simek6bd55f02012-12-27 10:40:38 +010030 pr_info(" r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010031 regs->r13, regs->r14, regs->r15, regs->r16);
Michal Simek6bd55f02012-12-27 10:40:38 +010032 pr_info(" r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010033 regs->r17, regs->r18, regs->r19, regs->r20);
Michal Simek6bd55f02012-12-27 10:40:38 +010034 pr_info(" r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010035 regs->r21, regs->r22, regs->r23, regs->r24);
Michal Simek6bd55f02012-12-27 10:40:38 +010036 pr_info(" r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010037 regs->r25, regs->r26, regs->r27, regs->r28);
Michal Simek6bd55f02012-12-27 10:40:38 +010038 pr_info(" r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010039 regs->r29, regs->r30, regs->r31, regs->pc);
Michal Simek6bd55f02012-12-27 10:40:38 +010040 pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
Michal Simek6496a232009-03-27 14:25:26 +010041 regs->msr, regs->ear, regs->esr, regs->fsr);
Michal Simek6496a232009-03-27 14:25:26 +010042}
43
44void (*pm_idle)(void);
45void (*pm_power_off)(void) = NULL;
46EXPORT_SYMBOL(pm_power_off);
47
48static int hlt_counter = 1;
49
50void disable_hlt(void)
51{
52 hlt_counter++;
53}
54EXPORT_SYMBOL(disable_hlt);
55
56void enable_hlt(void)
57{
58 hlt_counter--;
59}
60EXPORT_SYMBOL(enable_hlt);
61
62static int __init nohlt_setup(char *__unused)
63{
64 hlt_counter = 1;
65 return 1;
66}
67__setup("nohlt", nohlt_setup);
68
69static int __init hlt_setup(char *__unused)
70{
71 hlt_counter = 0;
72 return 1;
73}
74__setup("hlt", hlt_setup);
75
76void default_idle(void)
77{
Michal Simek78ebfa82010-03-23 15:37:02 +010078 if (likely(hlt_counter)) {
Michal Simekd0f140e2010-06-10 16:02:32 +020079 local_irq_disable();
80 stop_critical_timings();
81 cpu_relax();
82 start_critical_timings();
83 local_irq_enable();
Michal Simek78ebfa82010-03-23 15:37:02 +010084 } else {
Michal Simek6496a232009-03-27 14:25:26 +010085 clear_thread_flag(TIF_POLLING_NRFLAG);
86 smp_mb__after_clear_bit();
87 local_irq_disable();
88 while (!need_resched())
89 cpu_sleep();
90 local_irq_enable();
91 set_thread_flag(TIF_POLLING_NRFLAG);
Michal Simek78ebfa82010-03-23 15:37:02 +010092 }
Michal Simek6496a232009-03-27 14:25:26 +010093}
94
95void cpu_idle(void)
96{
97 set_thread_flag(TIF_POLLING_NRFLAG);
98
99 /* endless idle loop with no priority at all */
100 while (1) {
101 void (*idle)(void) = pm_idle;
102
103 if (!idle)
104 idle = default_idle;
105
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100106 tick_nohz_idle_enter();
107 rcu_idle_enter();
Michal Simek6496a232009-03-27 14:25:26 +0100108 while (!need_resched())
109 idle();
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100110 rcu_idle_exit();
111 tick_nohz_idle_exit();
Michal Simek6496a232009-03-27 14:25:26 +0100112
Thomas Gleixnerbd2f5532011-03-21 12:33:18 +0100113 schedule_preempt_disabled();
Michal Simek6496a232009-03-27 14:25:26 +0100114 check_pgt_cache();
115 }
116}
117
118void flush_thread(void)
119{
120}
121
Michal Simeka8fb7482009-04-14 09:18:19 +0200122int copy_thread(unsigned long clone_flags, unsigned long usp,
Al Viroafa86fc2012-10-22 22:51:14 -0400123 unsigned long arg, struct task_struct *p)
Michal Simek6496a232009-03-27 14:25:26 +0100124{
125 struct pt_regs *childregs = task_pt_regs(p);
126 struct thread_info *ti = task_thread_info(p);
127
Al Viro23192952012-10-06 13:52:37 -0400128 if (unlikely(p->flags & PF_KTHREAD)) {
129 /* if we're creating a new kernel thread then just zeroing all
130 * the registers. That's OK for a brand new thread.*/
131 memset(childregs, 0, sizeof(struct pt_regs));
132 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
133 ti->cpu_context.r1 = (unsigned long)childregs;
134 ti->cpu_context.r20 = (unsigned long)usp; /* fn */
135 ti->cpu_context.r19 = (unsigned long)arg;
136 childregs->pt_mode = 1;
137 local_save_flags(childregs->msr);
138#ifdef CONFIG_MMU
139 ti->cpu_context.msr = childregs->msr & ~MSR_IE;
140#endif
141 ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8;
142 return 0;
143 }
Al Virof3268ed2012-10-27 00:03:41 -0400144 *childregs = *current_pt_regs();
145 if (usp)
146 childregs->r1 = usp;
Michal Simek6496a232009-03-27 14:25:26 +0100147
148 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
149 ti->cpu_context.r1 = (unsigned long)childregs;
Al Viro23192952012-10-06 13:52:37 -0400150#ifndef CONFIG_MMU
Michal Simek6496a232009-03-27 14:25:26 +0100151 ti->cpu_context.msr = (unsigned long)childregs->msr;
Michal Simek52338062009-05-26 16:30:18 +0200152#else
Al Viro23192952012-10-06 13:52:37 -0400153 childregs->msr |= MSR_UMS;
Michal Simek52338062009-05-26 16:30:18 +0200154
Michal Simek52338062009-05-26 16:30:18 +0200155 /* we should consider the fact that childregs is a copy of the parent
156 * regs which were saved immediately after entering the kernel state
157 * before enabling VM. This MSR will be restored in switch_to and
158 * RETURN() and we want to have the right machine state there
159 * specifically this state must have INTs disabled before and enabled
160 * after performing rtbd
161 * compose the right MSR for RETURN(). It will work for switch_to also
162 * excepting for VM and UMS
163 * don't touch UMS , CARRY and cache bits
164 * right now MSR is a copy of parent one */
Michal Simek52338062009-05-26 16:30:18 +0200165 childregs->msr &= ~MSR_EIP;
166 childregs->msr |= MSR_IE;
167 childregs->msr &= ~MSR_VM;
168 childregs->msr |= MSR_VMS;
169 childregs->msr |= MSR_EE; /* exceptions will be enabled*/
170
171 ti->cpu_context.msr = (childregs->msr|MSR_VM);
172 ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
Peter Zijlstra84ac2182011-08-24 15:32:31 +0200173 ti->cpu_context.msr &= ~MSR_IE;
Michal Simek52338062009-05-26 16:30:18 +0200174#endif
Michal Simek6496a232009-03-27 14:25:26 +0100175 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
176
Edgar E. Iglesiasd5c15f12012-02-24 14:52:25 +1000177 /*
178 * r21 is the thread reg, r10 is 6th arg to clone
179 * which contains TLS area
180 */
Michal Simek6496a232009-03-27 14:25:26 +0100181 if (clone_flags & CLONE_SETTLS)
Edgar E. Iglesiasd5c15f12012-02-24 14:52:25 +1000182 childregs->r21 = childregs->r10;
Michal Simek6496a232009-03-27 14:25:26 +0100183
184 return 0;
185}
186
Michal Simek52338062009-05-26 16:30:18 +0200187#ifndef CONFIG_MMU
Michal Simek6496a232009-03-27 14:25:26 +0100188/*
189 * Return saved PC of a blocked thread.
190 */
191unsigned long thread_saved_pc(struct task_struct *tsk)
192{
193 struct cpu_context *ctx =
194 &(((struct thread_info *)(tsk->stack))->cpu_context);
195
196 /* Check whether the thread is blocked in resume() */
197 if (in_sched_functions(ctx->r15))
198 return (unsigned long)ctx->r15;
199 else
200 return ctx->r14;
201}
Michal Simek52338062009-05-26 16:30:18 +0200202#endif
Michal Simek6496a232009-03-27 14:25:26 +0100203
Michal Simek6496a232009-03-27 14:25:26 +0100204unsigned long get_wchan(struct task_struct *p)
205{
206/* TBD (used by procfs) */
207 return 0;
208}
Michal Simeke1c4bd02009-04-16 11:30:16 +0200209
210/* Set up a thread for executing a new program */
211void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
212{
Michal Simeke1c4bd02009-04-16 11:30:16 +0200213 regs->pc = pc;
214 regs->r1 = usp;
215 regs->pt_mode = 0;
Michal Simekf1ae3f62009-09-25 11:52:50 +0200216#ifdef CONFIG_MMU
John Williams866d7222009-09-17 21:21:22 +1000217 regs->msr |= MSR_UMS;
Al Viro99c59f62012-10-10 11:52:44 -0400218 regs->msr &= ~MSR_VM;
Michal Simekf1ae3f62009-09-25 11:52:50 +0200219#endif
Michal Simeke1c4bd02009-04-16 11:30:16 +0200220}
Michal Simek52338062009-05-26 16:30:18 +0200221
222#ifdef CONFIG_MMU
223#include <linux/elfcore.h>
224/*
225 * Set up a thread for executing a new program
226 */
227int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
228{
229 return 0; /* MicroBlaze has no separate FPU registers */
230}
231#endif /* CONFIG_MMU */