blob: 4827ca1ec89b74dfc782b47d921e30172a76200a [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000202 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000203 lwz r11,TI_FLAGS(r10)
204 andi. r11,r11,_TIF_SYSCALL_T_OR_A
205 bne- syscall_dotrace
206syscall_dotrace_cont:
207 cmplwi 0,r0,NR_syscalls
208 lis r10,sys_call_table@h
209 ori r10,r10,sys_call_table@l
210 slwi r0,r0,2
211 bge- 66f
212 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
213 mtlr r10
214 addi r9,r1,STACK_FRAME_OVERHEAD
215 PPC440EP_ERR42
216 blrl /* Call handler */
217 .globl ret_from_syscall
218ret_from_syscall:
219#ifdef SHOW_SYSCALLS
220 bl do_show_syscall_exit
221#endif
222 mr r6,r3
David Gibson6cb7bfe2005-10-21 15:45:50 +1000223 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000224 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000225 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000226 SYNC
227 MTMSRD(r10)
228 lwz r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000229 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100230 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000231 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000232 cmplw 0,r3,r8
233 blt+ syscall_exit_cont
234 lwz r11,_CCR(r1) /* Load CR */
235 neg r3,r3
236 oris r11,r11,0x1000 /* Set SO bit in CR */
237 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000238syscall_exit_cont:
239#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
240 /* If the process has its own DBCR0 value, load it up. The single
241 step bit tells us that dbcr0 should be loaded. */
242 lwz r0,THREAD+THREAD_DBCR0(r2)
243 andis. r10,r0,DBCR0_IC@h
244 bnel- load_dbcr0
245#endif
246 stwcx. r0,0,r1 /* to clear the reservation */
247 lwz r4,_LINK(r1)
248 lwz r5,_CCR(r1)
249 mtlr r4
250 mtcr r5
251 lwz r7,_NIP(r1)
252 lwz r8,_MSR(r1)
253 FIX_SRR1(r8, r0)
254 lwz r2,GPR2(r1)
255 lwz r1,GPR1(r1)
256 mtspr SPRN_SRR0,r7
257 mtspr SPRN_SRR1,r8
258 SYNC
259 RFI
260
26166: li r3,-ENOSYS
262 b ret_from_syscall
263
264 .globl ret_from_fork
265ret_from_fork:
266 REST_NVGPRS(r1)
267 bl schedule_tail
268 li r3,0
269 b ret_from_syscall
270
271/* Traced system call support */
272syscall_dotrace:
273 SAVE_NVGPRS(r1)
274 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000275 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000276 addi r3,r1,STACK_FRAME_OVERHEAD
277 bl do_syscall_trace_enter
278 lwz r0,GPR0(r1) /* Restore original registers */
279 lwz r3,GPR3(r1)
280 lwz r4,GPR4(r1)
281 lwz r5,GPR5(r1)
282 lwz r6,GPR6(r1)
283 lwz r7,GPR7(r1)
284 lwz r8,GPR8(r1)
285 REST_NVGPRS(r1)
286 b syscall_dotrace_cont
287
288syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000289 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100290 beq+ 0f
291 REST_NVGPRS(r1)
292 b 2f
2930: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000294 blt+ 1f
295 andi. r0,r9,_TIF_NOERROR
296 bne- 1f
297 lwz r11,_CCR(r1) /* Load CR */
298 neg r3,r3
299 oris r11,r11,0x1000 /* Set SO bit in CR */
300 stw r11,_CCR(r1)
301
3021: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000303 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00003042: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
305 beq 4f
306
Paul Mackerras1bd79332006-03-08 13:24:22 +1100307 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000308
309 li r11,_TIF_PERSYSCALL_MASK
310 addi r12,r12,TI_FLAGS
3113: lwarx r8,0,r12
312 andc r8,r8,r11
313#ifdef CONFIG_IBM405_ERR77
314 dcbt 0,r12
315#endif
316 stwcx. r8,0,r12
317 bne- 3b
318 subi r12,r12,TI_FLAGS
319
3204: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100321 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
322 beq ret_from_except
323
324 /* Re-enable interrupts */
325 ori r10,r10,MSR_EE
326 SYNC
327 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000328
329 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000330 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000331 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000332 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000333 SAVE_NVGPRS(r1)
334 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000335 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11003365:
Paul Mackerras9994a332005-10-10 22:36:14 +1000337 addi r3,r1,STACK_FRAME_OVERHEAD
338 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100339 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000340
Paul Mackerras9994a332005-10-10 22:36:14 +1000341#ifdef SHOW_SYSCALLS
342do_show_syscall:
343#ifdef SHOW_SYSCALLS_TASK
344 lis r11,show_syscalls_task@ha
345 lwz r11,show_syscalls_task@l(r11)
346 cmp 0,r2,r11
347 bnelr
348#endif
349 stw r31,GPR31(r1)
350 mflr r31
351 lis r3,7f@ha
352 addi r3,r3,7f@l
353 lwz r4,GPR0(r1)
354 lwz r5,GPR3(r1)
355 lwz r6,GPR4(r1)
356 lwz r7,GPR5(r1)
357 lwz r8,GPR6(r1)
358 lwz r9,GPR7(r1)
359 bl printk
360 lis r3,77f@ha
361 addi r3,r3,77f@l
362 lwz r4,GPR8(r1)
363 mr r5,r2
364 bl printk
365 lwz r0,GPR0(r1)
366 lwz r3,GPR3(r1)
367 lwz r4,GPR4(r1)
368 lwz r5,GPR5(r1)
369 lwz r6,GPR6(r1)
370 lwz r7,GPR7(r1)
371 lwz r8,GPR8(r1)
372 mtlr r31
373 lwz r31,GPR31(r1)
374 blr
375
376do_show_syscall_exit:
377#ifdef SHOW_SYSCALLS_TASK
378 lis r11,show_syscalls_task@ha
379 lwz r11,show_syscalls_task@l(r11)
380 cmp 0,r2,r11
381 bnelr
382#endif
383 stw r31,GPR31(r1)
384 mflr r31
385 stw r3,RESULT(r1) /* Save result */
386 mr r4,r3
387 lis r3,79f@ha
388 addi r3,r3,79f@l
389 bl printk
390 lwz r3,RESULT(r1)
391 mtlr r31
392 lwz r31,GPR31(r1)
393 blr
394
3957: .string "syscall %d(%x, %x, %x, %x, %x, "
39677: .string "%x), current=%p\n"
39779: .string " -> %x\n"
398 .align 2,0
399
400#ifdef SHOW_SYSCALLS_TASK
401 .data
402 .globl show_syscalls_task
403show_syscalls_task:
404 .long -1
405 .text
406#endif
407#endif /* SHOW_SYSCALLS */
408
409/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000410 * The fork/clone functions need to copy the full register set into
411 * the child process. Therefore we need to save all the nonvolatile
412 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000413 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000414 .globl ppc_fork
415ppc_fork:
416 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000417 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000418 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000419 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000420 b sys_fork
421
422 .globl ppc_vfork
423ppc_vfork:
424 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000425 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000426 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000427 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000428 b sys_vfork
429
430 .globl ppc_clone
431ppc_clone:
432 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000433 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000434 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000435 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000436 b sys_clone
437
Paul Mackerras1bd79332006-03-08 13:24:22 +1100438 .globl ppc_swapcontext
439ppc_swapcontext:
440 SAVE_NVGPRS(r1)
441 lwz r0,_TRAP(r1)
442 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
443 stw r0,_TRAP(r1) /* register set saved */
444 b sys_swapcontext
445
Paul Mackerras9994a332005-10-10 22:36:14 +1000446/*
447 * Top-level page fault handling.
448 * This is in assembler because if do_page_fault tells us that
449 * it is a bad kernel page fault, we want to save the non-volatile
450 * registers before calling bad_page_fault.
451 */
452 .globl handle_page_fault
453handle_page_fault:
454 stw r4,_DAR(r1)
455 addi r3,r1,STACK_FRAME_OVERHEAD
456 bl do_page_fault
457 cmpwi r3,0
458 beq+ ret_from_except
459 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000460 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000461 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000462 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000463 mr r5,r3
464 addi r3,r1,STACK_FRAME_OVERHEAD
465 lwz r4,_DAR(r1)
466 bl bad_page_fault
467 b ret_from_except_full
468
469/*
470 * This routine switches between two different tasks. The process
471 * state of one is saved on its kernel stack. Then the state
472 * of the other is restored from its kernel stack. The memory
473 * management hardware is updated to the second process's state.
474 * Finally, we can return to the second process.
475 * On entry, r3 points to the THREAD for the current task, r4
476 * points to the THREAD for the new task.
477 *
478 * This routine is always called with interrupts disabled.
479 *
480 * Note: there are two ways to get to the "going out" portion
481 * of this code; either by coming in via the entry (_switch)
482 * or via "fork" which must set up an environment equivalent
483 * to the "_switch" path. If you change this , you'll have to
484 * change the fork code also.
485 *
486 * The code which creates the new task context is in 'copy_thread'
487 * in arch/ppc/kernel/process.c
488 */
489_GLOBAL(_switch)
490 stwu r1,-INT_FRAME_SIZE(r1)
491 mflr r0
492 stw r0,INT_FRAME_SIZE+4(r1)
493 /* r3-r12 are caller saved -- Cort */
494 SAVE_NVGPRS(r1)
495 stw r0,_NIP(r1) /* Return to switch caller */
496 mfmsr r11
497 li r0,MSR_FP /* Disable floating-point */
498#ifdef CONFIG_ALTIVEC
499BEGIN_FTR_SECTION
500 oris r0,r0,MSR_VEC@h /* Disable altivec */
501 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
502 stw r12,THREAD+THREAD_VRSAVE(r2)
503END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
504#endif /* CONFIG_ALTIVEC */
505#ifdef CONFIG_SPE
506 oris r0,r0,MSR_SPE@h /* Disable SPE */
507 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
508 stw r12,THREAD+THREAD_SPEFSCR(r2)
509#endif /* CONFIG_SPE */
510 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
511 beq+ 1f
512 andc r11,r11,r0
513 MTMSRD(r11)
514 isync
5151: stw r11,_MSR(r1)
516 mfcr r10
517 stw r10,_CCR(r1)
518 stw r1,KSP(r3) /* Set old stack pointer */
519
520#ifdef CONFIG_SMP
521 /* We need a sync somewhere here to make sure that if the
522 * previous task gets rescheduled on another CPU, it sees all
523 * stores it has performed on this one.
524 */
525 sync
526#endif /* CONFIG_SMP */
527
528 tophys(r0,r4)
529 CLR_TOP32(r0)
530 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
531 lwz r1,KSP(r4) /* Load new stack pointer */
532
533 /* save the old current 'last' for return value */
534 mr r3,r2
535 addi r2,r4,-THREAD /* Update current */
536
537#ifdef CONFIG_ALTIVEC
538BEGIN_FTR_SECTION
539 lwz r0,THREAD+THREAD_VRSAVE(r2)
540 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
541END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
542#endif /* CONFIG_ALTIVEC */
543#ifdef CONFIG_SPE
544 lwz r0,THREAD+THREAD_SPEFSCR(r2)
545 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
546#endif /* CONFIG_SPE */
547
548 lwz r0,_CCR(r1)
549 mtcrf 0xFF,r0
550 /* r3-r12 are destroyed -- Cort */
551 REST_NVGPRS(r1)
552
553 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
554 mtlr r4
555 addi r1,r1,INT_FRAME_SIZE
556 blr
557
558 .globl fast_exception_return
559fast_exception_return:
560#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
561 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
562 beq 1f /* if not, we've got problems */
563#endif
564
5652: REST_4GPRS(3, r11)
566 lwz r10,_CCR(r11)
567 REST_GPR(1, r11)
568 mtcr r10
569 lwz r10,_LINK(r11)
570 mtlr r10
571 REST_GPR(10, r11)
572 mtspr SPRN_SRR1,r9
573 mtspr SPRN_SRR0,r12
574 REST_GPR(9, r11)
575 REST_GPR(12, r11)
576 lwz r11,GPR11(r11)
577 SYNC
578 RFI
579
580#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
581/* check if the exception happened in a restartable section */
5821: lis r3,exc_exit_restart_end@ha
583 addi r3,r3,exc_exit_restart_end@l
584 cmplw r12,r3
585 bge 3f
586 lis r4,exc_exit_restart@ha
587 addi r4,r4,exc_exit_restart@l
588 cmplw r12,r4
589 blt 3f
590 lis r3,fee_restarts@ha
591 tophys(r3,r3)
592 lwz r5,fee_restarts@l(r3)
593 addi r5,r5,1
594 stw r5,fee_restarts@l(r3)
595 mr r12,r4 /* restart at exc_exit_restart */
596 b 2b
597
598 .comm fee_restarts,4
599
600/* aargh, a nonrecoverable interrupt, panic */
601/* aargh, we don't know which trap this is */
602/* but the 601 doesn't implement the RI bit, so assume it's OK */
6033:
604BEGIN_FTR_SECTION
605 b 2b
606END_FTR_SECTION_IFSET(CPU_FTR_601)
607 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000608 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000609 addi r3,r1,STACK_FRAME_OVERHEAD
610 lis r10,MSR_KERNEL@h
611 ori r10,r10,MSR_KERNEL@l
612 bl transfer_to_handler_full
613 .long nonrecoverable_exception
614 .long ret_from_except
615#endif
616
Paul Mackerras9994a332005-10-10 22:36:14 +1000617 .globl ret_from_except_full
618ret_from_except_full:
619 REST_NVGPRS(r1)
620 /* fall through */
621
622 .globl ret_from_except
623ret_from_except:
624 /* Hard-disable interrupts so that current_thread_info()->flags
625 * can't change between when we test it and when we return
626 * from the interrupt. */
627 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
628 SYNC /* Some chip revs have problems here... */
629 MTMSRD(r10) /* disable interrupts */
630
631 lwz r3,_MSR(r1) /* Returning to user mode? */
632 andi. r0,r3,MSR_PR
633 beq resume_kernel
634
635user_exc_return: /* r10 contains MSR_KERNEL here */
636 /* Check current_thread_info()->flags */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000637 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000638 lwz r9,TI_FLAGS(r9)
Paul Mackerras1bd79332006-03-08 13:24:22 +1100639 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
Paul Mackerras9994a332005-10-10 22:36:14 +1000640 bne do_work
641
642restore_user:
643#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
644 /* Check whether this process has its own DBCR0 value. The single
645 step bit tells us that dbcr0 should be loaded. */
646 lwz r0,THREAD+THREAD_DBCR0(r2)
647 andis. r10,r0,DBCR0_IC@h
648 bnel- load_dbcr0
649#endif
650
651#ifdef CONFIG_PREEMPT
652 b restore
653
654/* N.B. the only way to get here is from the beq following ret_from_except. */
655resume_kernel:
656 /* check current_thread_info->preempt_count */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000657 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000658 lwz r0,TI_PREEMPT(r9)
659 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
660 bne restore
661 lwz r0,TI_FLAGS(r9)
662 andi. r0,r0,_TIF_NEED_RESCHED
663 beq+ restore
664 andi. r0,r3,MSR_EE /* interrupts off? */
665 beq restore /* don't schedule if so */
6661: bl preempt_schedule_irq
David Gibson6cb7bfe2005-10-21 15:45:50 +1000667 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000668 lwz r3,TI_FLAGS(r9)
669 andi. r0,r3,_TIF_NEED_RESCHED
670 bne- 1b
671#else
672resume_kernel:
673#endif /* CONFIG_PREEMPT */
674
675 /* interrupts are hard-disabled at this point */
676restore:
677 lwz r0,GPR0(r1)
678 lwz r2,GPR2(r1)
679 REST_4GPRS(3, r1)
680 REST_2GPRS(7, r1)
681
682 lwz r10,_XER(r1)
683 lwz r11,_CTR(r1)
684 mtspr SPRN_XER,r10
685 mtctr r11
686
687 PPC405_ERR77(0,r1)
688 stwcx. r0,0,r1 /* to clear the reservation */
689
690#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
691 lwz r9,_MSR(r1)
692 andi. r10,r9,MSR_RI /* check if this exception occurred */
693 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
694
695 lwz r10,_CCR(r1)
696 lwz r11,_LINK(r1)
697 mtcrf 0xFF,r10
698 mtlr r11
699
700 /*
701 * Once we put values in SRR0 and SRR1, we are in a state
702 * where exceptions are not recoverable, since taking an
703 * exception will trash SRR0 and SRR1. Therefore we clear the
704 * MSR:RI bit to indicate this. If we do take an exception,
705 * we can't return to the point of the exception but we
706 * can restart the exception exit path at the label
707 * exc_exit_restart below. -- paulus
708 */
709 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
710 SYNC
711 MTMSRD(r10) /* clear the RI bit */
712 .globl exc_exit_restart
713exc_exit_restart:
714 lwz r9,_MSR(r1)
715 lwz r12,_NIP(r1)
716 FIX_SRR1(r9,r10)
717 mtspr SPRN_SRR0,r12
718 mtspr SPRN_SRR1,r9
719 REST_4GPRS(9, r1)
720 lwz r1,GPR1(r1)
721 .globl exc_exit_restart_end
722exc_exit_restart_end:
723 SYNC
724 RFI
725
726#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
727 /*
728 * This is a bit different on 4xx/Book-E because it doesn't have
729 * the RI bit in the MSR.
730 * The TLB miss handler checks if we have interrupted
731 * the exception exit path and restarts it if so
732 * (well maybe one day it will... :).
733 */
734 lwz r11,_LINK(r1)
735 mtlr r11
736 lwz r10,_CCR(r1)
737 mtcrf 0xff,r10
738 REST_2GPRS(9, r1)
739 .globl exc_exit_restart
740exc_exit_restart:
741 lwz r11,_NIP(r1)
742 lwz r12,_MSR(r1)
743exc_exit_start:
744 mtspr SPRN_SRR0,r11
745 mtspr SPRN_SRR1,r12
746 REST_2GPRS(11, r1)
747 lwz r1,GPR1(r1)
748 .globl exc_exit_restart_end
749exc_exit_restart_end:
750 PPC405_ERR77_SYNC
751 rfi
752 b . /* prevent prefetch past rfi */
753
754/*
755 * Returning from a critical interrupt in user mode doesn't need
756 * to be any different from a normal exception. For a critical
757 * interrupt in the kernel, we just return (without checking for
758 * preemption) since the interrupt may have happened at some crucial
759 * place (e.g. inside the TLB miss handler), and because we will be
760 * running with r1 pointing into critical_stack, not the current
761 * process's kernel stack (and therefore current_thread_info() will
762 * give the wrong answer).
763 * We have to restore various SPRs that may have been in use at the
764 * time of the critical interrupt.
765 *
766 */
767#ifdef CONFIG_40x
768#define PPC_40x_TURN_OFF_MSR_DR \
769 /* avoid any possible TLB misses here by turning off MSR.DR, we \
770 * assume the instructions here are mapped by a pinned TLB entry */ \
771 li r10,MSR_IR; \
772 mtmsr r10; \
773 isync; \
774 tophys(r1, r1);
775#else
776#define PPC_40x_TURN_OFF_MSR_DR
777#endif
778
779#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
780 REST_NVGPRS(r1); \
781 lwz r3,_MSR(r1); \
782 andi. r3,r3,MSR_PR; \
783 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
784 bne user_exc_return; \
785 lwz r0,GPR0(r1); \
786 lwz r2,GPR2(r1); \
787 REST_4GPRS(3, r1); \
788 REST_2GPRS(7, r1); \
789 lwz r10,_XER(r1); \
790 lwz r11,_CTR(r1); \
791 mtspr SPRN_XER,r10; \
792 mtctr r11; \
793 PPC405_ERR77(0,r1); \
794 stwcx. r0,0,r1; /* to clear the reservation */ \
795 lwz r11,_LINK(r1); \
796 mtlr r11; \
797 lwz r10,_CCR(r1); \
798 mtcrf 0xff,r10; \
799 PPC_40x_TURN_OFF_MSR_DR; \
800 lwz r9,_DEAR(r1); \
801 lwz r10,_ESR(r1); \
802 mtspr SPRN_DEAR,r9; \
803 mtspr SPRN_ESR,r10; \
804 lwz r11,_NIP(r1); \
805 lwz r12,_MSR(r1); \
806 mtspr exc_lvl_srr0,r11; \
807 mtspr exc_lvl_srr1,r12; \
808 lwz r9,GPR9(r1); \
809 lwz r12,GPR12(r1); \
810 lwz r10,GPR10(r1); \
811 lwz r11,GPR11(r1); \
812 lwz r1,GPR1(r1); \
813 PPC405_ERR77_SYNC; \
814 exc_lvl_rfi; \
815 b .; /* prevent prefetch past exc_lvl_rfi */
816
817 .globl ret_from_crit_exc
818ret_from_crit_exc:
819 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
820
821#ifdef CONFIG_BOOKE
822 .globl ret_from_debug_exc
823ret_from_debug_exc:
824 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
825
826 .globl ret_from_mcheck_exc
827ret_from_mcheck_exc:
828 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
829#endif /* CONFIG_BOOKE */
830
831/*
832 * Load the DBCR0 value for a task that is being ptraced,
833 * having first saved away the global DBCR0. Note that r0
834 * has the dbcr0 value to set upon entry to this.
835 */
836load_dbcr0:
837 mfmsr r10 /* first disable debug exceptions */
838 rlwinm r10,r10,0,~MSR_DE
839 mtmsr r10
840 isync
841 mfspr r10,SPRN_DBCR0
842 lis r11,global_dbcr0@ha
843 addi r11,r11,global_dbcr0@l
844 stw r10,0(r11)
845 mtspr SPRN_DBCR0,r0
846 lwz r10,4(r11)
847 addi r10,r10,1
848 stw r10,4(r11)
849 li r11,-1
850 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
851 blr
852
853 .comm global_dbcr0,8
854#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
855
856do_work: /* r10 contains MSR_KERNEL here */
857 andi. r0,r9,_TIF_NEED_RESCHED
858 beq do_user_signal
859
860do_resched: /* r10 contains MSR_KERNEL here */
861 ori r10,r10,MSR_EE
862 SYNC
863 MTMSRD(r10) /* hard-enable interrupts */
864 bl schedule
865recheck:
866 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
867 SYNC
868 MTMSRD(r10) /* disable interrupts */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000869 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000870 lwz r9,TI_FLAGS(r9)
871 andi. r0,r9,_TIF_NEED_RESCHED
872 bne- do_resched
David Woodhousef27201d2006-01-18 17:44:01 -0800873 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000874 beq restore_user
875do_user_signal: /* r10 contains MSR_KERNEL here */
876 ori r10,r10,MSR_EE
877 SYNC
878 MTMSRD(r10) /* hard-enable interrupts */
879 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000880 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000881 andi. r0,r3,1
882 beq 2f
883 SAVE_NVGPRS(r1)
884 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000885 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10008862: li r3,0
887 addi r4,r1,STACK_FRAME_OVERHEAD
888 bl do_signal
889 REST_NVGPRS(r1)
890 b recheck
891
892/*
893 * We come here when we are at the end of handling an exception
894 * that occurred at a place where taking an exception will lose
895 * state information, such as the contents of SRR0 and SRR1.
896 */
897nonrecoverable:
898 lis r10,exc_exit_restart_end@ha
899 addi r10,r10,exc_exit_restart_end@l
900 cmplw r12,r10
901 bge 3f
902 lis r11,exc_exit_restart@ha
903 addi r11,r11,exc_exit_restart@l
904 cmplw r12,r11
905 blt 3f
906 lis r10,ee_restarts@ha
907 lwz r12,ee_restarts@l(r10)
908 addi r12,r12,1
909 stw r12,ee_restarts@l(r10)
910 mr r12,r11 /* restart at exc_exit_restart */
911 blr
9123: /* OK, we can't recover, kill this process */
913 /* but the 601 doesn't implement the RI bit, so assume it's OK */
914BEGIN_FTR_SECTION
915 blr
916END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000917 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000918 andi. r0,r3,1
919 beq 4f
920 SAVE_NVGPRS(r1)
921 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000922 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10009234: addi r3,r1,STACK_FRAME_OVERHEAD
924 bl nonrecoverable_exception
925 /* shouldn't return */
926 b 4b
927
928 .comm ee_restarts,4
929
930/*
931 * PROM code for specific machines follows. Put it
932 * here so it's easy to add arch-specific sections later.
933 * -- Cort
934 */
Paul Mackerras033ef332005-10-26 17:05:24 +1000935#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +1000936/*
937 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
938 * called with the MMU off.
939 */
940_GLOBAL(enter_rtas)
941 stwu r1,-INT_FRAME_SIZE(r1)
942 mflr r0
943 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +1100944 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000945 lis r6,1f@ha /* physical return address for rtas */
946 addi r6,r6,1f@l
947 tophys(r6,r6)
948 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +1000949 lwz r8,RTASENTRY(r4)
950 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000951 mfmsr r9
952 stw r9,8(r1)
953 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
954 SYNC /* disable interrupts so SRR0/1 */
955 MTMSRD(r0) /* don't get trashed */
956 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
957 mtlr r6
Paul Mackerras9994a332005-10-10 22:36:14 +1000958 mtspr SPRN_SPRG2,r7
959 mtspr SPRN_SRR0,r8
960 mtspr SPRN_SRR1,r9
961 RFI
9621: tophys(r9,r1)
963 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
964 lwz r9,8(r9) /* original msr value */
965 FIX_SRR1(r9,r0)
966 addi r1,r1,INT_FRAME_SIZE
967 li r0,0
968 mtspr SPRN_SPRG2,r0
969 mtspr SPRN_SRR0,r8
970 mtspr SPRN_SRR1,r9
971 RFI /* return to caller */
972
973 .globl machine_check_in_rtas
974machine_check_in_rtas:
975 twi 31,0,0
976 /* XXX load up BATs and panic */
977
Paul Mackerras033ef332005-10-26 17:05:24 +1000978#endif /* CONFIG_PPC_RTAS */