blob: ed4aeb96398b1aebc78456c5a9715cbd7d251e53 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Paul Mackerras9994a332005-10-10 22:36:14 +100022#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053033#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100034
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +100048 .globl mcheck_transfer_to_handler
49mcheck_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050050 mfspr r0,SPRN_DSRR0
51 stw r0,_DSRR0(r11)
52 mfspr r0,SPRN_DSRR1
53 stw r0,_DSRR1(r11)
54 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100055
56 .globl debug_transfer_to_handler
57debug_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050058 mfspr r0,SPRN_CSRR0
59 stw r0,_CSRR0(r11)
60 mfspr r0,SPRN_CSRR1
61 stw r0,_CSRR1(r11)
62 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100063
64 .globl crit_transfer_to_handler
65crit_transfer_to_handler:
Kumar Gala70fe3af2009-02-12 16:12:40 -060066#ifdef CONFIG_PPC_BOOK3E_MMU
Kumar Galafca622c2008-04-30 05:23:21 -050067 mfspr r0,SPRN_MAS0
68 stw r0,MAS0(r11)
69 mfspr r0,SPRN_MAS1
70 stw r0,MAS1(r11)
71 mfspr r0,SPRN_MAS2
72 stw r0,MAS2(r11)
73 mfspr r0,SPRN_MAS3
74 stw r0,MAS3(r11)
75 mfspr r0,SPRN_MAS6
76 stw r0,MAS6(r11)
77#ifdef CONFIG_PHYS_64BIT
78 mfspr r0,SPRN_MAS7
79 stw r0,MAS7(r11)
80#endif /* CONFIG_PHYS_64BIT */
Kumar Gala70fe3af2009-02-12 16:12:40 -060081#endif /* CONFIG_PPC_BOOK3E_MMU */
Kumar Galafca622c2008-04-30 05:23:21 -050082#ifdef CONFIG_44x
83 mfspr r0,SPRN_MMUCR
84 stw r0,MMUCR(r11)
85#endif
86 mfspr r0,SPRN_SRR0
87 stw r0,_SRR0(r11)
88 mfspr r0,SPRN_SRR1
89 stw r0,_SRR1(r11)
90
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +000091 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -050092 lwz r0,KSP_LIMIT(r8)
93 stw r0,SAVED_KSP_LIMIT(r11)
94 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
95 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +100096 /* fall through */
97#endif
98
99#ifdef CONFIG_40x
100 .globl crit_transfer_to_handler
101crit_transfer_to_handler:
102 lwz r0,crit_r10@l(0)
103 stw r0,GPR10(r11)
104 lwz r0,crit_r11@l(0)
105 stw r0,GPR11(r11)
Kumar Galafca622c2008-04-30 05:23:21 -0500106 mfspr r0,SPRN_SRR0
107 stw r0,crit_srr0@l(0)
108 mfspr r0,SPRN_SRR1
109 stw r0,crit_srr1@l(0)
110
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000111 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -0500112 lwz r0,KSP_LIMIT(r8)
113 stw r0,saved_ksp_limit@l(0)
114 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
115 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000116 /* fall through */
117#endif
118
119/*
120 * This code finishes saving the registers to the exception frame
121 * and jumps to the appropriate handler for the exception, turning
122 * on address translation.
123 * Note that we rely on the caller having set cr0.eq iff the exception
124 * occurred in kernel mode (i.e. MSR:PR = 0).
125 */
126 .globl transfer_to_handler_full
127transfer_to_handler_full:
128 SAVE_NVGPRS(r11)
129 /* fall through */
130
131 .globl transfer_to_handler
132transfer_to_handler:
133 stw r2,GPR2(r11)
134 stw r12,_NIP(r11)
135 stw r9,_MSR(r11)
136 andi. r2,r9,MSR_PR
137 mfctr r12
138 mfspr r2,SPRN_XER
139 stw r12,_CTR(r11)
140 stw r2,_XER(r11)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000141 mfspr r12,SPRN_SPRG_THREAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000142 addi r2,r12,-THREAD
143 tovirt(r2,r2) /* set r2 to current */
144 beq 2f /* if from user, fix up THREAD.regs */
145 addi r11,r1,STACK_FRAME_OVERHEAD
146 stw r11,PT_REGS(r12)
147#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
148 /* Check to see if the dbcr0 register is set up to debug. Use the
Kumar Gala4eaddb42008-04-09 16:15:40 -0500149 internal debug mode bit to do this. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000150 lwz r12,THREAD_DBCR0(r12)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000151 andis. r12,r12,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000152 beq+ 3f
153 /* From user and task is ptraced - load up global dbcr0 */
154 li r12,-1 /* clear all pending debug events */
155 mtspr SPRN_DBSR,r12
156 lis r11,global_dbcr0@ha
157 tophys(r11,r11)
158 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -0500159#ifdef CONFIG_SMP
160 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
161 lwz r9,TI_CPU(r9)
162 slwi r9,r9,3
163 add r11,r11,r9
164#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000165 lwz r12,0(r11)
166 mtspr SPRN_DBCR0,r12
167 lwz r12,4(r11)
168 addi r12,r12,-1
169 stw r12,4(r11)
170#endif
171 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000172
Paul Mackerras9994a332005-10-10 22:36:14 +10001732: /* if from kernel, check interrupted DOZE/NAP mode and
174 * check for stack overflow
175 */
Kumar Gala85218822008-04-28 16:21:22 +1000176 lwz r9,KSP_LIMIT(r12)
177 cmplw r1,r9 /* if r1 <= ksp_limit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000178 ble- stack_ovf /* then the kernel stack overflowed */
1795:
Kumar Galafc4033b2008-06-18 16:26:52 -0500180#if defined(CONFIG_6xx) || defined(CONFIG_E500)
Kumar Gala85218822008-04-28 16:21:22 +1000181 rlwinm r9,r1,0,0,31-THREAD_SHIFT
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000182 tophys(r9,r9) /* check local flags */
183 lwz r12,TI_LOCAL_FLAGS(r9)
184 mtcrf 0x01,r12
185 bt- 31-TLF_NAPPING,4f
Paul Mackerrasa5606432008-05-14 14:30:48 +1000186 bt- 31-TLF_SLEEPING,7f
Kumar Galafc4033b2008-06-18 16:26:52 -0500187#endif /* CONFIG_6xx || CONFIG_E500 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000188 .globl transfer_to_handler_cont
189transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10001903:
191 mflr r9
192 lwz r11,0(r9) /* virtual address of handler */
193 lwz r9,4(r9) /* where to go when done */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000194#ifdef CONFIG_TRACE_IRQFLAGS
195 lis r12,reenable_mmu@h
196 ori r12,r12,reenable_mmu@l
197 mtspr SPRN_SRR0,r12
198 mtspr SPRN_SRR1,r10
199 SYNC
200 RFI
201reenable_mmu: /* re-enable mmu so we can */
202 mfmsr r10
203 lwz r12,_MSR(r1)
204 xor r10,r10,r12
205 andi. r10,r10,MSR_EE /* Did EE change? */
206 beq 1f
207
208 /* Save handler and return address into the 2 unused words
209 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
210 * else can be recovered from the pt_regs except r3 which for
211 * normal interrupts has been set to pt_regs and for syscalls
212 * is an argument, so we temporarily use ORIG_GPR3 to save it
213 */
214 stw r9,8(r1)
215 stw r11,12(r1)
216 stw r3,ORIG_GPR3(r1)
217 bl trace_hardirqs_off
218 lwz r0,GPR0(r1)
219 lwz r3,ORIG_GPR3(r1)
220 lwz r4,GPR4(r1)
221 lwz r5,GPR5(r1)
222 lwz r6,GPR6(r1)
223 lwz r7,GPR7(r1)
224 lwz r8,GPR8(r1)
225 lwz r9,8(r1)
226 lwz r11,12(r1)
2271: mtctr r11
228 mtlr r9
229 bctr /* jump to handler */
230#else /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000231 mtspr SPRN_SRR0,r11
232 mtspr SPRN_SRR1,r10
233 mtlr r9
234 SYNC
235 RFI /* jump to handler, enable MMU */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000236#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000237
Kumar Galafc4033b2008-06-18 16:26:52 -0500238#if defined (CONFIG_6xx) || defined(CONFIG_E500)
Paul Mackerrasf39224a2006-04-18 21:49:11 +10002394: rlwinm r12,r12,0,~_TLF_NAPPING
240 stw r12,TI_LOCAL_FLAGS(r9)
Kumar Galafc4033b2008-06-18 16:26:52 -0500241 b power_save_ppc32_restore
Paul Mackerrasa5606432008-05-14 14:30:48 +1000242
2437: rlwinm r12,r12,0,~_TLF_SLEEPING
244 stw r12,TI_LOCAL_FLAGS(r9)
245 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
246 rlwinm r9,r9,0,~MSR_EE
247 lwz r12,_LINK(r11) /* and return to address in LR */
248 b fast_exception_return
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100249#endif
250
Paul Mackerras9994a332005-10-10 22:36:14 +1000251/*
252 * On kernel stack overflow, load up an initial stack pointer
253 * and call StackOverflow(regs), which should not return.
254 */
255stack_ovf:
256 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000257 lis r12,_end@h
258 ori r12,r12,_end@l
259 cmplw r1,r12
260 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000261 SAVE_NVGPRS(r11)
262 addi r3,r1,STACK_FRAME_OVERHEAD
263 lis r1,init_thread_union@ha
264 addi r1,r1,init_thread_union@l
265 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
266 lis r9,StackOverflow@ha
267 addi r9,r9,StackOverflow@l
268 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
269 FIX_SRR1(r10,r12)
270 mtspr SPRN_SRR0,r9
271 mtspr SPRN_SRR1,r10
272 SYNC
273 RFI
274
275/*
276 * Handle a system call.
277 */
278 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
279 .stabs "entry_32.S",N_SO,0,0,0f
2800:
281
282_GLOBAL(DoSyscall)
Paul Mackerras9994a332005-10-10 22:36:14 +1000283 stw r3,ORIG_GPR3(r1)
284 li r12,0
285 stw r12,RESULT(r1)
286 lwz r11,_CCR(r1) /* Clear SO bit in CR */
287 rlwinm r11,r11,0,4,2
288 stw r11,_CCR(r1)
289#ifdef SHOW_SYSCALLS
290 bl do_show_syscall
291#endif /* SHOW_SYSCALLS */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000292#ifdef CONFIG_TRACE_IRQFLAGS
293 /* Return from syscalls can (and generally will) hard enable
294 * interrupts. You aren't supposed to call a syscall with
295 * interrupts disabled in the first place. However, to ensure
296 * that we get it right vs. lockdep if it happens, we force
297 * that hard enable here with appropriate tracing if we see
298 * that we have been called with interrupts off
299 */
300 mfmsr r11
301 andi. r12,r11,MSR_EE
302 bne+ 1f
303 /* We came in with interrupts disabled, we enable them now */
304 bl trace_hardirqs_on
305 mfmsr r11
306 lwz r0,GPR0(r1)
307 lwz r3,GPR3(r1)
308 lwz r4,GPR4(r1)
309 ori r11,r11,MSR_EE
310 lwz r5,GPR5(r1)
311 lwz r6,GPR6(r1)
312 lwz r7,GPR7(r1)
313 lwz r8,GPR8(r1)
314 mtmsr r11
3151:
316#endif /* CONFIG_TRACE_IRQFLAGS */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000317 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000318 lwz r11,TI_FLAGS(r10)
319 andi. r11,r11,_TIF_SYSCALL_T_OR_A
320 bne- syscall_dotrace
321syscall_dotrace_cont:
322 cmplwi 0,r0,NR_syscalls
323 lis r10,sys_call_table@h
324 ori r10,r10,sys_call_table@l
325 slwi r0,r0,2
326 bge- 66f
327 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
328 mtlr r10
329 addi r9,r1,STACK_FRAME_OVERHEAD
330 PPC440EP_ERR42
331 blrl /* Call handler */
332 .globl ret_from_syscall
333ret_from_syscall:
334#ifdef SHOW_SYSCALLS
335 bl do_show_syscall_exit
336#endif
337 mr r6,r3
David Gibson6cb7bfe2005-10-21 15:45:50 +1000338 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000339 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000340 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000341 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000342 SYNC
343 MTMSRD(r10)
344 lwz r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000345 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100346 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000347 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000348 cmplw 0,r3,r8
349 blt+ syscall_exit_cont
350 lwz r11,_CCR(r1) /* Load CR */
351 neg r3,r3
352 oris r11,r11,0x1000 /* Set SO bit in CR */
353 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000354syscall_exit_cont:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000355 lwz r8,_MSR(r1)
356#ifdef CONFIG_TRACE_IRQFLAGS
357 /* If we are going to return from the syscall with interrupts
358 * off, we trace that here. It shouldn't happen though but we
359 * want to catch the bugger if it does right ?
360 */
361 andi. r10,r8,MSR_EE
362 bne+ 1f
363 stw r3,GPR3(r1)
364 bl trace_hardirqs_off
365 lwz r3,GPR3(r1)
3661:
367#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000368#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500369 /* If the process has its own DBCR0 value, load it up. The internal
370 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000371 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000372 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000373 bnel- load_dbcr0
374#endif
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100375#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000376BEGIN_MMU_FTR_SECTION
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100377 lis r4,icache_44x_need_flush@ha
378 lwz r5,icache_44x_need_flush@l(r4)
379 cmplwi cr0,r5,0
380 bne- 2f
3811:
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000382END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100383#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100384BEGIN_FTR_SECTION
385 lwarx r7,0,r1
386END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000387 stwcx. r0,0,r1 /* to clear the reservation */
388 lwz r4,_LINK(r1)
389 lwz r5,_CCR(r1)
390 mtlr r4
391 mtcr r5
392 lwz r7,_NIP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000393 FIX_SRR1(r8, r0)
394 lwz r2,GPR2(r1)
395 lwz r1,GPR1(r1)
396 mtspr SPRN_SRR0,r7
397 mtspr SPRN_SRR1,r8
398 SYNC
399 RFI
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100400#ifdef CONFIG_44x
4012: li r7,0
402 iccci r0,r0
403 stw r7,icache_44x_need_flush@l(r4)
404 b 1b
405#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000406
40766: li r3,-ENOSYS
408 b ret_from_syscall
409
410 .globl ret_from_fork
411ret_from_fork:
412 REST_NVGPRS(r1)
413 bl schedule_tail
414 li r3,0
415 b ret_from_syscall
416
417/* Traced system call support */
418syscall_dotrace:
419 SAVE_NVGPRS(r1)
420 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000421 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000422 addi r3,r1,STACK_FRAME_OVERHEAD
423 bl do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000424 /*
425 * Restore argument registers possibly just changed.
426 * We use the return value of do_syscall_trace_enter
427 * for call number to look up in the table (r0).
428 */
429 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000430 lwz r3,GPR3(r1)
431 lwz r4,GPR4(r1)
432 lwz r5,GPR5(r1)
433 lwz r6,GPR6(r1)
434 lwz r7,GPR7(r1)
435 lwz r8,GPR8(r1)
436 REST_NVGPRS(r1)
437 b syscall_dotrace_cont
438
439syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000440 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100441 beq+ 0f
442 REST_NVGPRS(r1)
443 b 2f
4440: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000445 blt+ 1f
446 andi. r0,r9,_TIF_NOERROR
447 bne- 1f
448 lwz r11,_CCR(r1) /* Load CR */
449 neg r3,r3
450 oris r11,r11,0x1000 /* Set SO bit in CR */
451 stw r11,_CCR(r1)
452
4531: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000454 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00004552: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
456 beq 4f
457
Paul Mackerras1bd79332006-03-08 13:24:22 +1100458 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000459
460 li r11,_TIF_PERSYSCALL_MASK
461 addi r12,r12,TI_FLAGS
4623: lwarx r8,0,r12
463 andc r8,r8,r11
464#ifdef CONFIG_IBM405_ERR77
465 dcbt 0,r12
466#endif
467 stwcx. r8,0,r12
468 bne- 3b
469 subi r12,r12,TI_FLAGS
470
4714: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100472 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
473 beq ret_from_except
474
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000475 /* Re-enable interrupts. There is no need to trace that with
476 * lockdep as we are supposed to have IRQs on at this point
477 */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100478 ori r10,r10,MSR_EE
479 SYNC
480 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000481
482 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000483 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000484 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000485 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000486 SAVE_NVGPRS(r1)
487 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000488 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11004895:
Paul Mackerras9994a332005-10-10 22:36:14 +1000490 addi r3,r1,STACK_FRAME_OVERHEAD
491 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100492 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000493
Paul Mackerras9994a332005-10-10 22:36:14 +1000494#ifdef SHOW_SYSCALLS
495do_show_syscall:
496#ifdef SHOW_SYSCALLS_TASK
497 lis r11,show_syscalls_task@ha
498 lwz r11,show_syscalls_task@l(r11)
499 cmp 0,r2,r11
500 bnelr
501#endif
502 stw r31,GPR31(r1)
503 mflr r31
504 lis r3,7f@ha
505 addi r3,r3,7f@l
506 lwz r4,GPR0(r1)
507 lwz r5,GPR3(r1)
508 lwz r6,GPR4(r1)
509 lwz r7,GPR5(r1)
510 lwz r8,GPR6(r1)
511 lwz r9,GPR7(r1)
512 bl printk
513 lis r3,77f@ha
514 addi r3,r3,77f@l
515 lwz r4,GPR8(r1)
516 mr r5,r2
517 bl printk
518 lwz r0,GPR0(r1)
519 lwz r3,GPR3(r1)
520 lwz r4,GPR4(r1)
521 lwz r5,GPR5(r1)
522 lwz r6,GPR6(r1)
523 lwz r7,GPR7(r1)
524 lwz r8,GPR8(r1)
525 mtlr r31
526 lwz r31,GPR31(r1)
527 blr
528
529do_show_syscall_exit:
530#ifdef SHOW_SYSCALLS_TASK
531 lis r11,show_syscalls_task@ha
532 lwz r11,show_syscalls_task@l(r11)
533 cmp 0,r2,r11
534 bnelr
535#endif
536 stw r31,GPR31(r1)
537 mflr r31
538 stw r3,RESULT(r1) /* Save result */
539 mr r4,r3
540 lis r3,79f@ha
541 addi r3,r3,79f@l
542 bl printk
543 lwz r3,RESULT(r1)
544 mtlr r31
545 lwz r31,GPR31(r1)
546 blr
547
5487: .string "syscall %d(%x, %x, %x, %x, %x, "
54977: .string "%x), current=%p\n"
55079: .string " -> %x\n"
551 .align 2,0
552
553#ifdef SHOW_SYSCALLS_TASK
554 .data
555 .globl show_syscalls_task
556show_syscalls_task:
557 .long -1
558 .text
559#endif
560#endif /* SHOW_SYSCALLS */
561
562/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000563 * The fork/clone functions need to copy the full register set into
564 * the child process. Therefore we need to save all the nonvolatile
565 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000566 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000567 .globl ppc_fork
568ppc_fork:
569 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000570 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000571 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000572 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000573 b sys_fork
574
575 .globl ppc_vfork
576ppc_vfork:
577 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000578 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000579 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000580 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000581 b sys_vfork
582
583 .globl ppc_clone
584ppc_clone:
585 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000586 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000587 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000588 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000589 b sys_clone
590
Paul Mackerras1bd79332006-03-08 13:24:22 +1100591 .globl ppc_swapcontext
592ppc_swapcontext:
593 SAVE_NVGPRS(r1)
594 lwz r0,_TRAP(r1)
595 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
596 stw r0,_TRAP(r1) /* register set saved */
597 b sys_swapcontext
598
Paul Mackerras9994a332005-10-10 22:36:14 +1000599/*
600 * Top-level page fault handling.
601 * This is in assembler because if do_page_fault tells us that
602 * it is a bad kernel page fault, we want to save the non-volatile
603 * registers before calling bad_page_fault.
604 */
605 .globl handle_page_fault
606handle_page_fault:
607 stw r4,_DAR(r1)
608 addi r3,r1,STACK_FRAME_OVERHEAD
609 bl do_page_fault
610 cmpwi r3,0
611 beq+ ret_from_except
612 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000613 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000614 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000615 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000616 mr r5,r3
617 addi r3,r1,STACK_FRAME_OVERHEAD
618 lwz r4,_DAR(r1)
619 bl bad_page_fault
620 b ret_from_except_full
621
622/*
623 * This routine switches between two different tasks. The process
624 * state of one is saved on its kernel stack. Then the state
625 * of the other is restored from its kernel stack. The memory
626 * management hardware is updated to the second process's state.
627 * Finally, we can return to the second process.
628 * On entry, r3 points to the THREAD for the current task, r4
629 * points to the THREAD for the new task.
630 *
631 * This routine is always called with interrupts disabled.
632 *
633 * Note: there are two ways to get to the "going out" portion
634 * of this code; either by coming in via the entry (_switch)
635 * or via "fork" which must set up an environment equivalent
636 * to the "_switch" path. If you change this , you'll have to
637 * change the fork code also.
638 *
639 * The code which creates the new task context is in 'copy_thread'
640 * in arch/ppc/kernel/process.c
641 */
642_GLOBAL(_switch)
643 stwu r1,-INT_FRAME_SIZE(r1)
644 mflr r0
645 stw r0,INT_FRAME_SIZE+4(r1)
646 /* r3-r12 are caller saved -- Cort */
647 SAVE_NVGPRS(r1)
648 stw r0,_NIP(r1) /* Return to switch caller */
649 mfmsr r11
650 li r0,MSR_FP /* Disable floating-point */
651#ifdef CONFIG_ALTIVEC
652BEGIN_FTR_SECTION
653 oris r0,r0,MSR_VEC@h /* Disable altivec */
654 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
655 stw r12,THREAD+THREAD_VRSAVE(r2)
656END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
657#endif /* CONFIG_ALTIVEC */
658#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500659BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000660 oris r0,r0,MSR_SPE@h /* Disable SPE */
661 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
662 stw r12,THREAD+THREAD_SPEFSCR(r2)
Kumar Gala5e14d212007-09-13 01:44:20 -0500663END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000664#endif /* CONFIG_SPE */
665 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
666 beq+ 1f
667 andc r11,r11,r0
668 MTMSRD(r11)
669 isync
6701: stw r11,_MSR(r1)
671 mfcr r10
672 stw r10,_CCR(r1)
673 stw r1,KSP(r3) /* Set old stack pointer */
674
675#ifdef CONFIG_SMP
676 /* We need a sync somewhere here to make sure that if the
677 * previous task gets rescheduled on another CPU, it sees all
678 * stores it has performed on this one.
679 */
680 sync
681#endif /* CONFIG_SMP */
682
683 tophys(r0,r4)
684 CLR_TOP32(r0)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000685 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
Paul Mackerras9994a332005-10-10 22:36:14 +1000686 lwz r1,KSP(r4) /* Load new stack pointer */
687
688 /* save the old current 'last' for return value */
689 mr r3,r2
690 addi r2,r4,-THREAD /* Update current */
691
692#ifdef CONFIG_ALTIVEC
693BEGIN_FTR_SECTION
694 lwz r0,THREAD+THREAD_VRSAVE(r2)
695 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
696END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
697#endif /* CONFIG_ALTIVEC */
698#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500699BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000700 lwz r0,THREAD+THREAD_SPEFSCR(r2)
701 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
Kumar Gala5e14d212007-09-13 01:44:20 -0500702END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000703#endif /* CONFIG_SPE */
704
705 lwz r0,_CCR(r1)
706 mtcrf 0xFF,r0
707 /* r3-r12 are destroyed -- Cort */
708 REST_NVGPRS(r1)
709
710 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
711 mtlr r4
712 addi r1,r1,INT_FRAME_SIZE
713 blr
714
715 .globl fast_exception_return
716fast_exception_return:
717#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
718 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
719 beq 1f /* if not, we've got problems */
720#endif
721
7222: REST_4GPRS(3, r11)
723 lwz r10,_CCR(r11)
724 REST_GPR(1, r11)
725 mtcr r10
726 lwz r10,_LINK(r11)
727 mtlr r10
728 REST_GPR(10, r11)
729 mtspr SPRN_SRR1,r9
730 mtspr SPRN_SRR0,r12
731 REST_GPR(9, r11)
732 REST_GPR(12, r11)
733 lwz r11,GPR11(r11)
734 SYNC
735 RFI
736
737#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
738/* check if the exception happened in a restartable section */
7391: lis r3,exc_exit_restart_end@ha
740 addi r3,r3,exc_exit_restart_end@l
741 cmplw r12,r3
742 bge 3f
743 lis r4,exc_exit_restart@ha
744 addi r4,r4,exc_exit_restart@l
745 cmplw r12,r4
746 blt 3f
747 lis r3,fee_restarts@ha
748 tophys(r3,r3)
749 lwz r5,fee_restarts@l(r3)
750 addi r5,r5,1
751 stw r5,fee_restarts@l(r3)
752 mr r12,r4 /* restart at exc_exit_restart */
753 b 2b
754
Kumar Gala991eb432007-05-14 17:11:58 -0500755 .section .bss
756 .align 2
757fee_restarts:
758 .space 4
759 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +1000760
761/* aargh, a nonrecoverable interrupt, panic */
762/* aargh, we don't know which trap this is */
763/* but the 601 doesn't implement the RI bit, so assume it's OK */
7643:
765BEGIN_FTR_SECTION
766 b 2b
767END_FTR_SECTION_IFSET(CPU_FTR_601)
768 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000769 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000770 addi r3,r1,STACK_FRAME_OVERHEAD
771 lis r10,MSR_KERNEL@h
772 ori r10,r10,MSR_KERNEL@l
773 bl transfer_to_handler_full
774 .long nonrecoverable_exception
775 .long ret_from_except
776#endif
777
Paul Mackerras9994a332005-10-10 22:36:14 +1000778 .globl ret_from_except_full
779ret_from_except_full:
780 REST_NVGPRS(r1)
781 /* fall through */
782
783 .globl ret_from_except
784ret_from_except:
785 /* Hard-disable interrupts so that current_thread_info()->flags
786 * can't change between when we test it and when we return
787 * from the interrupt. */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000788 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000789 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
790 SYNC /* Some chip revs have problems here... */
791 MTMSRD(r10) /* disable interrupts */
792
793 lwz r3,_MSR(r1) /* Returning to user mode? */
794 andi. r0,r3,MSR_PR
795 beq resume_kernel
796
797user_exc_return: /* r10 contains MSR_KERNEL here */
798 /* Check current_thread_info()->flags */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000799 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000800 lwz r9,TI_FLAGS(r9)
Roland McGrath7a101742008-04-28 17:30:37 +1000801 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000802 bne do_work
803
804restore_user:
805#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500806 /* Check whether this process has its own DBCR0 value. The internal
807 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000808 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000809 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000810 bnel- load_dbcr0
811#endif
812
813#ifdef CONFIG_PREEMPT
814 b restore
815
816/* N.B. the only way to get here is from the beq following ret_from_except. */
817resume_kernel:
818 /* check current_thread_info->preempt_count */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000819 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000820 lwz r0,TI_PREEMPT(r9)
821 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
822 bne restore
823 lwz r0,TI_FLAGS(r9)
824 andi. r0,r0,_TIF_NEED_RESCHED
825 beq+ restore
826 andi. r0,r3,MSR_EE /* interrupts off? */
827 beq restore /* don't schedule if so */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000828#ifdef CONFIG_TRACE_IRQFLAGS
829 /* Lockdep thinks irqs are enabled, we need to call
830 * preempt_schedule_irq with IRQs off, so we inform lockdep
831 * now that we -did- turn them off already
832 */
833 bl trace_hardirqs_off
834#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10008351: bl preempt_schedule_irq
David Gibson6cb7bfe2005-10-21 15:45:50 +1000836 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000837 lwz r3,TI_FLAGS(r9)
838 andi. r0,r3,_TIF_NEED_RESCHED
839 bne- 1b
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000840#ifdef CONFIG_TRACE_IRQFLAGS
841 /* And now, to properly rebalance the above, we tell lockdep they
842 * are being turned back on, which will happen when we return
843 */
844 bl trace_hardirqs_on
845#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000846#else
847resume_kernel:
848#endif /* CONFIG_PREEMPT */
849
850 /* interrupts are hard-disabled at this point */
851restore:
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100852#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000853BEGIN_MMU_FTR_SECTION
854 b 1f
855END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100856 lis r4,icache_44x_need_flush@ha
857 lwz r5,icache_44x_need_flush@l(r4)
858 cmplwi cr0,r5,0
859 beq+ 1f
860 li r6,0
861 iccci r0,r0
862 stw r6,icache_44x_need_flush@l(r4)
8631:
864#endif /* CONFIG_44x */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000865
866 lwz r9,_MSR(r1)
867#ifdef CONFIG_TRACE_IRQFLAGS
868 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
869 * off in this assembly code while peeking at TI_FLAGS() and such. However
870 * we need to inform it if the exception turned interrupts off, and we
871 * are about to trun them back on.
872 *
873 * The problem here sadly is that we don't know whether the exceptions was
874 * one that turned interrupts off or not. So we always tell lockdep about
875 * turning them on here when we go back to wherever we came from with EE
876 * on, even if that may meen some redudant calls being tracked. Maybe later
877 * we could encode what the exception did somewhere or test the exception
878 * type in the pt_regs but that sounds overkill
879 */
880 andi. r10,r9,MSR_EE
881 beq 1f
882 bl trace_hardirqs_on
883 lwz r9,_MSR(r1)
8841:
885#endif /* CONFIG_TRACE_IRQFLAGS */
886
Paul Mackerras9994a332005-10-10 22:36:14 +1000887 lwz r0,GPR0(r1)
888 lwz r2,GPR2(r1)
889 REST_4GPRS(3, r1)
890 REST_2GPRS(7, r1)
891
892 lwz r10,_XER(r1)
893 lwz r11,_CTR(r1)
894 mtspr SPRN_XER,r10
895 mtctr r11
896
897 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100898BEGIN_FTR_SECTION
899 lwarx r11,0,r1
900END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000901 stwcx. r0,0,r1 /* to clear the reservation */
902
903#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
Paul Mackerras9994a332005-10-10 22:36:14 +1000904 andi. r10,r9,MSR_RI /* check if this exception occurred */
905 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
906
907 lwz r10,_CCR(r1)
908 lwz r11,_LINK(r1)
909 mtcrf 0xFF,r10
910 mtlr r11
911
912 /*
913 * Once we put values in SRR0 and SRR1, we are in a state
914 * where exceptions are not recoverable, since taking an
915 * exception will trash SRR0 and SRR1. Therefore we clear the
916 * MSR:RI bit to indicate this. If we do take an exception,
917 * we can't return to the point of the exception but we
918 * can restart the exception exit path at the label
919 * exc_exit_restart below. -- paulus
920 */
921 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
922 SYNC
923 MTMSRD(r10) /* clear the RI bit */
924 .globl exc_exit_restart
925exc_exit_restart:
Paul Mackerras9994a332005-10-10 22:36:14 +1000926 lwz r12,_NIP(r1)
927 FIX_SRR1(r9,r10)
928 mtspr SPRN_SRR0,r12
929 mtspr SPRN_SRR1,r9
930 REST_4GPRS(9, r1)
931 lwz r1,GPR1(r1)
932 .globl exc_exit_restart_end
933exc_exit_restart_end:
934 SYNC
935 RFI
936
937#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
938 /*
939 * This is a bit different on 4xx/Book-E because it doesn't have
940 * the RI bit in the MSR.
941 * The TLB miss handler checks if we have interrupted
942 * the exception exit path and restarts it if so
943 * (well maybe one day it will... :).
944 */
945 lwz r11,_LINK(r1)
946 mtlr r11
947 lwz r10,_CCR(r1)
948 mtcrf 0xff,r10
949 REST_2GPRS(9, r1)
950 .globl exc_exit_restart
951exc_exit_restart:
952 lwz r11,_NIP(r1)
953 lwz r12,_MSR(r1)
954exc_exit_start:
955 mtspr SPRN_SRR0,r11
956 mtspr SPRN_SRR1,r12
957 REST_2GPRS(11, r1)
958 lwz r1,GPR1(r1)
959 .globl exc_exit_restart_end
960exc_exit_restart_end:
961 PPC405_ERR77_SYNC
962 rfi
963 b . /* prevent prefetch past rfi */
964
965/*
966 * Returning from a critical interrupt in user mode doesn't need
967 * to be any different from a normal exception. For a critical
968 * interrupt in the kernel, we just return (without checking for
969 * preemption) since the interrupt may have happened at some crucial
970 * place (e.g. inside the TLB miss handler), and because we will be
971 * running with r1 pointing into critical_stack, not the current
972 * process's kernel stack (and therefore current_thread_info() will
973 * give the wrong answer).
974 * We have to restore various SPRs that may have been in use at the
975 * time of the critical interrupt.
976 *
977 */
978#ifdef CONFIG_40x
979#define PPC_40x_TURN_OFF_MSR_DR \
980 /* avoid any possible TLB misses here by turning off MSR.DR, we \
981 * assume the instructions here are mapped by a pinned TLB entry */ \
982 li r10,MSR_IR; \
983 mtmsr r10; \
984 isync; \
985 tophys(r1, r1);
986#else
987#define PPC_40x_TURN_OFF_MSR_DR
988#endif
989
990#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
991 REST_NVGPRS(r1); \
992 lwz r3,_MSR(r1); \
993 andi. r3,r3,MSR_PR; \
994 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
995 bne user_exc_return; \
996 lwz r0,GPR0(r1); \
997 lwz r2,GPR2(r1); \
998 REST_4GPRS(3, r1); \
999 REST_2GPRS(7, r1); \
1000 lwz r10,_XER(r1); \
1001 lwz r11,_CTR(r1); \
1002 mtspr SPRN_XER,r10; \
1003 mtctr r11; \
1004 PPC405_ERR77(0,r1); \
1005 stwcx. r0,0,r1; /* to clear the reservation */ \
1006 lwz r11,_LINK(r1); \
1007 mtlr r11; \
1008 lwz r10,_CCR(r1); \
1009 mtcrf 0xff,r10; \
1010 PPC_40x_TURN_OFF_MSR_DR; \
1011 lwz r9,_DEAR(r1); \
1012 lwz r10,_ESR(r1); \
1013 mtspr SPRN_DEAR,r9; \
1014 mtspr SPRN_ESR,r10; \
1015 lwz r11,_NIP(r1); \
1016 lwz r12,_MSR(r1); \
1017 mtspr exc_lvl_srr0,r11; \
1018 mtspr exc_lvl_srr1,r12; \
1019 lwz r9,GPR9(r1); \
1020 lwz r12,GPR12(r1); \
1021 lwz r10,GPR10(r1); \
1022 lwz r11,GPR11(r1); \
1023 lwz r1,GPR1(r1); \
1024 PPC405_ERR77_SYNC; \
1025 exc_lvl_rfi; \
1026 b .; /* prevent prefetch past exc_lvl_rfi */
1027
Kumar Galafca622c2008-04-30 05:23:21 -05001028#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1029 lwz r9,_##exc_lvl_srr0(r1); \
1030 lwz r10,_##exc_lvl_srr1(r1); \
1031 mtspr SPRN_##exc_lvl_srr0,r9; \
1032 mtspr SPRN_##exc_lvl_srr1,r10;
1033
Kumar Gala70fe3af2009-02-12 16:12:40 -06001034#if defined(CONFIG_PPC_BOOK3E_MMU)
Kumar Galafca622c2008-04-30 05:23:21 -05001035#ifdef CONFIG_PHYS_64BIT
1036#define RESTORE_MAS7 \
1037 lwz r11,MAS7(r1); \
1038 mtspr SPRN_MAS7,r11;
1039#else
1040#define RESTORE_MAS7
1041#endif /* CONFIG_PHYS_64BIT */
1042#define RESTORE_MMU_REGS \
1043 lwz r9,MAS0(r1); \
1044 lwz r10,MAS1(r1); \
1045 lwz r11,MAS2(r1); \
1046 mtspr SPRN_MAS0,r9; \
1047 lwz r9,MAS3(r1); \
1048 mtspr SPRN_MAS1,r10; \
1049 lwz r10,MAS6(r1); \
1050 mtspr SPRN_MAS2,r11; \
1051 mtspr SPRN_MAS3,r9; \
1052 mtspr SPRN_MAS6,r10; \
1053 RESTORE_MAS7;
1054#elif defined(CONFIG_44x)
1055#define RESTORE_MMU_REGS \
1056 lwz r9,MMUCR(r1); \
1057 mtspr SPRN_MMUCR,r9;
1058#else
1059#define RESTORE_MMU_REGS
1060#endif
1061
1062#ifdef CONFIG_40x
Paul Mackerras9994a332005-10-10 22:36:14 +10001063 .globl ret_from_crit_exc
1064ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001065 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001066 lis r10,saved_ksp_limit@ha;
1067 lwz r10,saved_ksp_limit@l(r10);
1068 tovirt(r9,r9);
1069 stw r10,KSP_LIMIT(r9)
1070 lis r9,crit_srr0@ha;
1071 lwz r9,crit_srr0@l(r9);
1072 lis r10,crit_srr1@ha;
1073 lwz r10,crit_srr1@l(r10);
1074 mtspr SPRN_SRR0,r9;
1075 mtspr SPRN_SRR1,r10;
Kumar Gala16c57b32009-02-10 20:10:44 +00001076 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001077#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +10001078
1079#ifdef CONFIG_BOOKE
Kumar Galafca622c2008-04-30 05:23:21 -05001080 .globl ret_from_crit_exc
1081ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001082 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001083 lwz r10,SAVED_KSP_LIMIT(r1)
1084 stw r10,KSP_LIMIT(r9)
1085 RESTORE_xSRR(SRR0,SRR1);
1086 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001087 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001088
Paul Mackerras9994a332005-10-10 22:36:14 +10001089 .globl ret_from_debug_exc
1090ret_from_debug_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001091 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001092 lwz r10,SAVED_KSP_LIMIT(r1)
1093 stw r10,KSP_LIMIT(r9)
1094 lwz r9,THREAD_INFO-THREAD(r9)
1095 rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
1096 lwz r10,TI_PREEMPT(r10)
1097 stw r10,TI_PREEMPT(r9)
1098 RESTORE_xSRR(SRR0,SRR1);
1099 RESTORE_xSRR(CSRR0,CSRR1);
1100 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001101 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001102
1103 .globl ret_from_mcheck_exc
1104ret_from_mcheck_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001105 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001106 lwz r10,SAVED_KSP_LIMIT(r1)
1107 stw r10,KSP_LIMIT(r9)
1108 RESTORE_xSRR(SRR0,SRR1);
1109 RESTORE_xSRR(CSRR0,CSRR1);
1110 RESTORE_xSRR(DSRR0,DSRR1);
1111 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001112 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001113#endif /* CONFIG_BOOKE */
1114
1115/*
1116 * Load the DBCR0 value for a task that is being ptraced,
1117 * having first saved away the global DBCR0. Note that r0
1118 * has the dbcr0 value to set upon entry to this.
1119 */
1120load_dbcr0:
1121 mfmsr r10 /* first disable debug exceptions */
1122 rlwinm r10,r10,0,~MSR_DE
1123 mtmsr r10
1124 isync
1125 mfspr r10,SPRN_DBCR0
1126 lis r11,global_dbcr0@ha
1127 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -05001128#ifdef CONFIG_SMP
1129 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1130 lwz r9,TI_CPU(r9)
1131 slwi r9,r9,3
1132 add r11,r11,r9
1133#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10001134 stw r10,0(r11)
1135 mtspr SPRN_DBCR0,r0
1136 lwz r10,4(r11)
1137 addi r10,r10,1
1138 stw r10,4(r11)
1139 li r11,-1
1140 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1141 blr
1142
Kumar Gala991eb432007-05-14 17:11:58 -05001143 .section .bss
1144 .align 4
1145global_dbcr0:
Kumar Gala4eaddb42008-04-09 16:15:40 -05001146 .space 8*NR_CPUS
Kumar Gala991eb432007-05-14 17:11:58 -05001147 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001148#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1149
1150do_work: /* r10 contains MSR_KERNEL here */
1151 andi. r0,r9,_TIF_NEED_RESCHED
1152 beq do_user_signal
1153
1154do_resched: /* r10 contains MSR_KERNEL here */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001155 /* Note: We don't need to inform lockdep that we are enabling
1156 * interrupts here. As far as it knows, they are already enabled
1157 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001158 ori r10,r10,MSR_EE
1159 SYNC
1160 MTMSRD(r10) /* hard-enable interrupts */
1161 bl schedule
1162recheck:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001163 /* Note: And we don't tell it we are disabling them again
1164 * neither. Those disable/enable cycles used to peek at
1165 * TI_FLAGS aren't advertised.
1166 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001167 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1168 SYNC
1169 MTMSRD(r10) /* disable interrupts */
David Gibson6cb7bfe2005-10-21 15:45:50 +10001170 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +10001171 lwz r9,TI_FLAGS(r9)
1172 andi. r0,r9,_TIF_NEED_RESCHED
1173 bne- do_resched
Roland McGrath7a101742008-04-28 17:30:37 +10001174 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +10001175 beq restore_user
1176do_user_signal: /* r10 contains MSR_KERNEL here */
1177 ori r10,r10,MSR_EE
1178 SYNC
1179 MTMSRD(r10) /* hard-enable interrupts */
1180 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001181 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001182 andi. r0,r3,1
1183 beq 2f
1184 SAVE_NVGPRS(r1)
1185 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001186 stw r3,_TRAP(r1)
Roland McGrath7d6d6372008-07-27 16:52:52 +100011872: addi r3,r1,STACK_FRAME_OVERHEAD
1188 mr r4,r9
Paul Mackerras9994a332005-10-10 22:36:14 +10001189 bl do_signal
1190 REST_NVGPRS(r1)
1191 b recheck
1192
1193/*
1194 * We come here when we are at the end of handling an exception
1195 * that occurred at a place where taking an exception will lose
1196 * state information, such as the contents of SRR0 and SRR1.
1197 */
1198nonrecoverable:
1199 lis r10,exc_exit_restart_end@ha
1200 addi r10,r10,exc_exit_restart_end@l
1201 cmplw r12,r10
1202 bge 3f
1203 lis r11,exc_exit_restart@ha
1204 addi r11,r11,exc_exit_restart@l
1205 cmplw r12,r11
1206 blt 3f
1207 lis r10,ee_restarts@ha
1208 lwz r12,ee_restarts@l(r10)
1209 addi r12,r12,1
1210 stw r12,ee_restarts@l(r10)
1211 mr r12,r11 /* restart at exc_exit_restart */
1212 blr
12133: /* OK, we can't recover, kill this process */
1214 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1215BEGIN_FTR_SECTION
1216 blr
1217END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001218 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001219 andi. r0,r3,1
1220 beq 4f
1221 SAVE_NVGPRS(r1)
1222 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001223 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100012244: addi r3,r1,STACK_FRAME_OVERHEAD
1225 bl nonrecoverable_exception
1226 /* shouldn't return */
1227 b 4b
1228
Kumar Gala991eb432007-05-14 17:11:58 -05001229 .section .bss
1230 .align 2
1231ee_restarts:
1232 .space 4
1233 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001234
1235/*
1236 * PROM code for specific machines follows. Put it
1237 * here so it's easy to add arch-specific sections later.
1238 * -- Cort
1239 */
Paul Mackerras033ef332005-10-26 17:05:24 +10001240#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +10001241/*
1242 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1243 * called with the MMU off.
1244 */
1245_GLOBAL(enter_rtas)
1246 stwu r1,-INT_FRAME_SIZE(r1)
1247 mflr r0
1248 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +11001249 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001250 lis r6,1f@ha /* physical return address for rtas */
1251 addi r6,r6,1f@l
1252 tophys(r6,r6)
1253 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +10001254 lwz r8,RTASENTRY(r4)
1255 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +10001256 mfmsr r9
1257 stw r9,8(r1)
1258 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1259 SYNC /* disable interrupts so SRR0/1 */
1260 MTMSRD(r0) /* don't get trashed */
1261 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1262 mtlr r6
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001263 mtspr SPRN_SPRG_RTAS,r7
Paul Mackerras9994a332005-10-10 22:36:14 +10001264 mtspr SPRN_SRR0,r8
1265 mtspr SPRN_SRR1,r9
1266 RFI
12671: tophys(r9,r1)
1268 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1269 lwz r9,8(r9) /* original msr value */
1270 FIX_SRR1(r9,r0)
1271 addi r1,r1,INT_FRAME_SIZE
1272 li r0,0
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001273 mtspr SPRN_SPRG_RTAS,r0
Paul Mackerras9994a332005-10-10 22:36:14 +10001274 mtspr SPRN_SRR0,r8
1275 mtspr SPRN_SRR1,r9
1276 RFI /* return to caller */
1277
1278 .globl machine_check_in_rtas
1279machine_check_in_rtas:
1280 twi 31,0,0
1281 /* XXX load up BATs and panic */
1282
Paul Mackerras033ef332005-10-26 17:05:24 +10001283#endif /* CONFIG_PPC_RTAS */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001284
Steven Rostedt606576c2008-10-06 19:06:12 -04001285#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001286#ifdef CONFIG_DYNAMIC_FTRACE
1287_GLOBAL(mcount)
1288_GLOBAL(_mcount)
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001289 /*
1290 * It is required that _mcount on PPC32 must preserve the
1291 * link register. But we have r0 to play with. We use r0
1292 * to push the return address back to the caller of mcount
1293 * into the ctr register, restore the link register and
1294 * then jump back using the ctr register.
1295 */
1296 mflr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001297 mtctr r0
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001298 lwz r0, 4(r1)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001299 mtlr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001300 bctr
1301
1302_GLOBAL(ftrace_caller)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001303 MCOUNT_SAVE_FRAME
1304 /* r3 ends up with link register */
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301305 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001306.globl ftrace_call
1307ftrace_call:
1308 bl ftrace_stub
1309 nop
Steven Rostedt60ce8f72009-02-11 20:06:43 -05001310#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1311.globl ftrace_graph_call
1312ftrace_graph_call:
1313 b ftrace_graph_stub
1314_GLOBAL(ftrace_graph_stub)
1315#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001316 MCOUNT_RESTORE_FRAME
1317 /* old link register ends up in ctr reg */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001318 bctr
1319#else
1320_GLOBAL(mcount)
1321_GLOBAL(_mcount)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001322
1323 MCOUNT_SAVE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001324
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301325 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001326 LOAD_REG_ADDR(r5, ftrace_trace_function)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001327 lwz r5,0(r5)
Steven Rostedtccbfac22008-05-22 14:31:07 -04001328
Steven Rostedt4e491d12008-05-14 23:49:44 -04001329 mtctr r5
1330 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001331 nop
1332
Steven Rostedtfad4f472009-02-11 19:10:57 -05001333#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1334 b ftrace_graph_caller
1335#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001336 MCOUNT_RESTORE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001337 bctr
1338#endif
1339
1340_GLOBAL(ftrace_stub)
1341 blr
1342
Steven Rostedtfad4f472009-02-11 19:10:57 -05001343#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1344_GLOBAL(ftrace_graph_caller)
1345 /* load r4 with local address */
1346 lwz r4, 44(r1)
1347 subi r4, r4, MCOUNT_INSN_SIZE
1348
1349 /* get the parent address */
1350 addi r3, r1, 52
1351
1352 bl prepare_ftrace_return
1353 nop
1354
1355 MCOUNT_RESTORE_FRAME
1356 /* old link register ends up in ctr reg */
1357 bctr
1358
1359_GLOBAL(return_to_handler)
1360 /* need to save return values */
1361 stwu r1, -32(r1)
1362 stw r3, 20(r1)
1363 stw r4, 16(r1)
1364 stw r31, 12(r1)
1365 mr r31, r1
1366
1367 bl ftrace_return_to_handler
1368 nop
1369
1370 /* return value has real return address */
1371 mtlr r3
1372
1373 lwz r3, 20(r1)
1374 lwz r4, 16(r1)
1375 lwz r31,12(r1)
1376 lwz r1, 0(r1)
1377
1378 /* Jump back to real return address */
1379 blr
1380#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1381
Steven Rostedt4e491d12008-05-14 23:49:44 -04001382#endif /* CONFIG_MCOUNT */