blob: 8866fd26c6b97048dfb3c8142c95dcf4c43c3bcc [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000131
Paul Mackerras9994a332005-10-10 22:36:14 +10001322: /* if from kernel, check interrupted DOZE/NAP mode and
133 * check for stack overflow
134 */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000135 lwz r9,THREAD_INFO-THREAD(r12)
136 cmplw r1,r9 /* if r1 <= current->thread_info */
137 ble- stack_ovf /* then the kernel stack overflowed */
1385:
Paul Mackerras9994a332005-10-10 22:36:14 +1000139#ifdef CONFIG_6xx
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000140 tophys(r9,r9) /* check local flags */
141 lwz r12,TI_LOCAL_FLAGS(r9)
142 mtcrf 0x01,r12
143 bt- 31-TLF_NAPPING,4f
Paul Mackerras9994a332005-10-10 22:36:14 +1000144#endif /* CONFIG_6xx */
145 .globl transfer_to_handler_cont
146transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10001473:
148 mflr r9
149 lwz r11,0(r9) /* virtual address of handler */
150 lwz r9,4(r9) /* where to go when done */
Paul Mackerras9994a332005-10-10 22:36:14 +1000151 mtspr SPRN_SRR0,r11
152 mtspr SPRN_SRR1,r10
153 mtlr r9
154 SYNC
155 RFI /* jump to handler, enable MMU */
156
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000157#ifdef CONFIG_6xx
1584: rlwinm r12,r12,0,~_TLF_NAPPING
159 stw r12,TI_LOCAL_FLAGS(r9)
160 b power_save_6xx_restore
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100161#endif
162
Paul Mackerras9994a332005-10-10 22:36:14 +1000163/*
164 * On kernel stack overflow, load up an initial stack pointer
165 * and call StackOverflow(regs), which should not return.
166 */
167stack_ovf:
168 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000169 lis r12,_end@h
170 ori r12,r12,_end@l
171 cmplw r1,r12
172 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000173 SAVE_NVGPRS(r11)
174 addi r3,r1,STACK_FRAME_OVERHEAD
175 lis r1,init_thread_union@ha
176 addi r1,r1,init_thread_union@l
177 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
178 lis r9,StackOverflow@ha
179 addi r9,r9,StackOverflow@l
180 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
181 FIX_SRR1(r10,r12)
182 mtspr SPRN_SRR0,r9
183 mtspr SPRN_SRR1,r10
184 SYNC
185 RFI
186
187/*
188 * Handle a system call.
189 */
190 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
191 .stabs "entry_32.S",N_SO,0,0,0f
1920:
193
194_GLOBAL(DoSyscall)
195 stw r0,THREAD+LAST_SYSCALL(r2)
196 stw r3,ORIG_GPR3(r1)
197 li r12,0
198 stw r12,RESULT(r1)
199 lwz r11,_CCR(r1) /* Clear SO bit in CR */
200 rlwinm r11,r11,0,4,2
201 stw r11,_CCR(r1)
202#ifdef SHOW_SYSCALLS
203 bl do_show_syscall
204#endif /* SHOW_SYSCALLS */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000205 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000206 lwz r11,TI_FLAGS(r10)
207 andi. r11,r11,_TIF_SYSCALL_T_OR_A
208 bne- syscall_dotrace
209syscall_dotrace_cont:
210 cmplwi 0,r0,NR_syscalls
211 lis r10,sys_call_table@h
212 ori r10,r10,sys_call_table@l
213 slwi r0,r0,2
214 bge- 66f
215 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
216 mtlr r10
217 addi r9,r1,STACK_FRAME_OVERHEAD
218 PPC440EP_ERR42
219 blrl /* Call handler */
220 .globl ret_from_syscall
221ret_from_syscall:
222#ifdef SHOW_SYSCALLS
223 bl do_show_syscall_exit
224#endif
225 mr r6,r3
David Gibson6cb7bfe2005-10-21 15:45:50 +1000226 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000227 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000228 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000229 SYNC
230 MTMSRD(r10)
231 lwz r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000232 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100233 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000234 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000235 cmplw 0,r3,r8
236 blt+ syscall_exit_cont
237 lwz r11,_CCR(r1) /* Load CR */
238 neg r3,r3
239 oris r11,r11,0x1000 /* Set SO bit in CR */
240 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000241syscall_exit_cont:
242#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
243 /* If the process has its own DBCR0 value, load it up. The single
244 step bit tells us that dbcr0 should be loaded. */
245 lwz r0,THREAD+THREAD_DBCR0(r2)
246 andis. r10,r0,DBCR0_IC@h
247 bnel- load_dbcr0
248#endif
249 stwcx. r0,0,r1 /* to clear the reservation */
250 lwz r4,_LINK(r1)
251 lwz r5,_CCR(r1)
252 mtlr r4
253 mtcr r5
254 lwz r7,_NIP(r1)
255 lwz r8,_MSR(r1)
256 FIX_SRR1(r8, r0)
257 lwz r2,GPR2(r1)
258 lwz r1,GPR1(r1)
259 mtspr SPRN_SRR0,r7
260 mtspr SPRN_SRR1,r8
261 SYNC
262 RFI
263
26466: li r3,-ENOSYS
265 b ret_from_syscall
266
267 .globl ret_from_fork
268ret_from_fork:
269 REST_NVGPRS(r1)
270 bl schedule_tail
271 li r3,0
272 b ret_from_syscall
273
274/* Traced system call support */
275syscall_dotrace:
276 SAVE_NVGPRS(r1)
277 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000278 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000279 addi r3,r1,STACK_FRAME_OVERHEAD
280 bl do_syscall_trace_enter
281 lwz r0,GPR0(r1) /* Restore original registers */
282 lwz r3,GPR3(r1)
283 lwz r4,GPR4(r1)
284 lwz r5,GPR5(r1)
285 lwz r6,GPR6(r1)
286 lwz r7,GPR7(r1)
287 lwz r8,GPR8(r1)
288 REST_NVGPRS(r1)
289 b syscall_dotrace_cont
290
291syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000292 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100293 beq+ 0f
294 REST_NVGPRS(r1)
295 b 2f
2960: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000297 blt+ 1f
298 andi. r0,r9,_TIF_NOERROR
299 bne- 1f
300 lwz r11,_CCR(r1) /* Load CR */
301 neg r3,r3
302 oris r11,r11,0x1000 /* Set SO bit in CR */
303 stw r11,_CCR(r1)
304
3051: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000306 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00003072: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
308 beq 4f
309
Paul Mackerras1bd79332006-03-08 13:24:22 +1100310 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000311
312 li r11,_TIF_PERSYSCALL_MASK
313 addi r12,r12,TI_FLAGS
3143: lwarx r8,0,r12
315 andc r8,r8,r11
316#ifdef CONFIG_IBM405_ERR77
317 dcbt 0,r12
318#endif
319 stwcx. r8,0,r12
320 bne- 3b
321 subi r12,r12,TI_FLAGS
322
3234: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100324 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
325 beq ret_from_except
326
327 /* Re-enable interrupts */
328 ori r10,r10,MSR_EE
329 SYNC
330 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000331
332 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000333 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000334 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000335 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000336 SAVE_NVGPRS(r1)
337 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000338 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11003395:
Paul Mackerras9994a332005-10-10 22:36:14 +1000340 addi r3,r1,STACK_FRAME_OVERHEAD
341 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100342 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000343
Paul Mackerras9994a332005-10-10 22:36:14 +1000344#ifdef SHOW_SYSCALLS
345do_show_syscall:
346#ifdef SHOW_SYSCALLS_TASK
347 lis r11,show_syscalls_task@ha
348 lwz r11,show_syscalls_task@l(r11)
349 cmp 0,r2,r11
350 bnelr
351#endif
352 stw r31,GPR31(r1)
353 mflr r31
354 lis r3,7f@ha
355 addi r3,r3,7f@l
356 lwz r4,GPR0(r1)
357 lwz r5,GPR3(r1)
358 lwz r6,GPR4(r1)
359 lwz r7,GPR5(r1)
360 lwz r8,GPR6(r1)
361 lwz r9,GPR7(r1)
362 bl printk
363 lis r3,77f@ha
364 addi r3,r3,77f@l
365 lwz r4,GPR8(r1)
366 mr r5,r2
367 bl printk
368 lwz r0,GPR0(r1)
369 lwz r3,GPR3(r1)
370 lwz r4,GPR4(r1)
371 lwz r5,GPR5(r1)
372 lwz r6,GPR6(r1)
373 lwz r7,GPR7(r1)
374 lwz r8,GPR8(r1)
375 mtlr r31
376 lwz r31,GPR31(r1)
377 blr
378
379do_show_syscall_exit:
380#ifdef SHOW_SYSCALLS_TASK
381 lis r11,show_syscalls_task@ha
382 lwz r11,show_syscalls_task@l(r11)
383 cmp 0,r2,r11
384 bnelr
385#endif
386 stw r31,GPR31(r1)
387 mflr r31
388 stw r3,RESULT(r1) /* Save result */
389 mr r4,r3
390 lis r3,79f@ha
391 addi r3,r3,79f@l
392 bl printk
393 lwz r3,RESULT(r1)
394 mtlr r31
395 lwz r31,GPR31(r1)
396 blr
397
3987: .string "syscall %d(%x, %x, %x, %x, %x, "
39977: .string "%x), current=%p\n"
40079: .string " -> %x\n"
401 .align 2,0
402
403#ifdef SHOW_SYSCALLS_TASK
404 .data
405 .globl show_syscalls_task
406show_syscalls_task:
407 .long -1
408 .text
409#endif
410#endif /* SHOW_SYSCALLS */
411
412/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000413 * The fork/clone functions need to copy the full register set into
414 * the child process. Therefore we need to save all the nonvolatile
415 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000416 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000417 .globl ppc_fork
418ppc_fork:
419 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000420 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000421 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000422 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000423 b sys_fork
424
425 .globl ppc_vfork
426ppc_vfork:
427 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000428 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000429 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000430 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000431 b sys_vfork
432
433 .globl ppc_clone
434ppc_clone:
435 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000436 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000437 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000438 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000439 b sys_clone
440
Paul Mackerras1bd79332006-03-08 13:24:22 +1100441 .globl ppc_swapcontext
442ppc_swapcontext:
443 SAVE_NVGPRS(r1)
444 lwz r0,_TRAP(r1)
445 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
446 stw r0,_TRAP(r1) /* register set saved */
447 b sys_swapcontext
448
Paul Mackerras9994a332005-10-10 22:36:14 +1000449/*
450 * Top-level page fault handling.
451 * This is in assembler because if do_page_fault tells us that
452 * it is a bad kernel page fault, we want to save the non-volatile
453 * registers before calling bad_page_fault.
454 */
455 .globl handle_page_fault
456handle_page_fault:
457 stw r4,_DAR(r1)
458 addi r3,r1,STACK_FRAME_OVERHEAD
459 bl do_page_fault
460 cmpwi r3,0
461 beq+ ret_from_except
462 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000463 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000464 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000465 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000466 mr r5,r3
467 addi r3,r1,STACK_FRAME_OVERHEAD
468 lwz r4,_DAR(r1)
469 bl bad_page_fault
470 b ret_from_except_full
471
472/*
473 * This routine switches between two different tasks. The process
474 * state of one is saved on its kernel stack. Then the state
475 * of the other is restored from its kernel stack. The memory
476 * management hardware is updated to the second process's state.
477 * Finally, we can return to the second process.
478 * On entry, r3 points to the THREAD for the current task, r4
479 * points to the THREAD for the new task.
480 *
481 * This routine is always called with interrupts disabled.
482 *
483 * Note: there are two ways to get to the "going out" portion
484 * of this code; either by coming in via the entry (_switch)
485 * or via "fork" which must set up an environment equivalent
486 * to the "_switch" path. If you change this , you'll have to
487 * change the fork code also.
488 *
489 * The code which creates the new task context is in 'copy_thread'
490 * in arch/ppc/kernel/process.c
491 */
492_GLOBAL(_switch)
493 stwu r1,-INT_FRAME_SIZE(r1)
494 mflr r0
495 stw r0,INT_FRAME_SIZE+4(r1)
496 /* r3-r12 are caller saved -- Cort */
497 SAVE_NVGPRS(r1)
498 stw r0,_NIP(r1) /* Return to switch caller */
499 mfmsr r11
500 li r0,MSR_FP /* Disable floating-point */
501#ifdef CONFIG_ALTIVEC
502BEGIN_FTR_SECTION
503 oris r0,r0,MSR_VEC@h /* Disable altivec */
504 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
505 stw r12,THREAD+THREAD_VRSAVE(r2)
506END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
507#endif /* CONFIG_ALTIVEC */
508#ifdef CONFIG_SPE
509 oris r0,r0,MSR_SPE@h /* Disable SPE */
510 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
511 stw r12,THREAD+THREAD_SPEFSCR(r2)
512#endif /* CONFIG_SPE */
513 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
514 beq+ 1f
515 andc r11,r11,r0
516 MTMSRD(r11)
517 isync
5181: stw r11,_MSR(r1)
519 mfcr r10
520 stw r10,_CCR(r1)
521 stw r1,KSP(r3) /* Set old stack pointer */
522
523#ifdef CONFIG_SMP
524 /* We need a sync somewhere here to make sure that if the
525 * previous task gets rescheduled on another CPU, it sees all
526 * stores it has performed on this one.
527 */
528 sync
529#endif /* CONFIG_SMP */
530
531 tophys(r0,r4)
532 CLR_TOP32(r0)
533 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
534 lwz r1,KSP(r4) /* Load new stack pointer */
535
536 /* save the old current 'last' for return value */
537 mr r3,r2
538 addi r2,r4,-THREAD /* Update current */
539
540#ifdef CONFIG_ALTIVEC
541BEGIN_FTR_SECTION
542 lwz r0,THREAD+THREAD_VRSAVE(r2)
543 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
544END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
545#endif /* CONFIG_ALTIVEC */
546#ifdef CONFIG_SPE
547 lwz r0,THREAD+THREAD_SPEFSCR(r2)
548 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
549#endif /* CONFIG_SPE */
550
551 lwz r0,_CCR(r1)
552 mtcrf 0xFF,r0
553 /* r3-r12 are destroyed -- Cort */
554 REST_NVGPRS(r1)
555
556 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
557 mtlr r4
558 addi r1,r1,INT_FRAME_SIZE
559 blr
560
561 .globl fast_exception_return
562fast_exception_return:
563#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
564 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
565 beq 1f /* if not, we've got problems */
566#endif
567
5682: REST_4GPRS(3, r11)
569 lwz r10,_CCR(r11)
570 REST_GPR(1, r11)
571 mtcr r10
572 lwz r10,_LINK(r11)
573 mtlr r10
574 REST_GPR(10, r11)
575 mtspr SPRN_SRR1,r9
576 mtspr SPRN_SRR0,r12
577 REST_GPR(9, r11)
578 REST_GPR(12, r11)
579 lwz r11,GPR11(r11)
580 SYNC
581 RFI
582
583#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
584/* check if the exception happened in a restartable section */
5851: lis r3,exc_exit_restart_end@ha
586 addi r3,r3,exc_exit_restart_end@l
587 cmplw r12,r3
588 bge 3f
589 lis r4,exc_exit_restart@ha
590 addi r4,r4,exc_exit_restart@l
591 cmplw r12,r4
592 blt 3f
593 lis r3,fee_restarts@ha
594 tophys(r3,r3)
595 lwz r5,fee_restarts@l(r3)
596 addi r5,r5,1
597 stw r5,fee_restarts@l(r3)
598 mr r12,r4 /* restart at exc_exit_restart */
599 b 2b
600
601 .comm fee_restarts,4
602
603/* aargh, a nonrecoverable interrupt, panic */
604/* aargh, we don't know which trap this is */
605/* but the 601 doesn't implement the RI bit, so assume it's OK */
6063:
607BEGIN_FTR_SECTION
608 b 2b
609END_FTR_SECTION_IFSET(CPU_FTR_601)
610 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000611 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000612 addi r3,r1,STACK_FRAME_OVERHEAD
613 lis r10,MSR_KERNEL@h
614 ori r10,r10,MSR_KERNEL@l
615 bl transfer_to_handler_full
616 .long nonrecoverable_exception
617 .long ret_from_except
618#endif
619
Paul Mackerras9994a332005-10-10 22:36:14 +1000620 .globl ret_from_except_full
621ret_from_except_full:
622 REST_NVGPRS(r1)
623 /* fall through */
624
625 .globl ret_from_except
626ret_from_except:
627 /* Hard-disable interrupts so that current_thread_info()->flags
628 * can't change between when we test it and when we return
629 * from the interrupt. */
630 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
631 SYNC /* Some chip revs have problems here... */
632 MTMSRD(r10) /* disable interrupts */
633
634 lwz r3,_MSR(r1) /* Returning to user mode? */
635 andi. r0,r3,MSR_PR
636 beq resume_kernel
637
638user_exc_return: /* r10 contains MSR_KERNEL here */
639 /* Check current_thread_info()->flags */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000640 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000641 lwz r9,TI_FLAGS(r9)
Paul Mackerras1bd79332006-03-08 13:24:22 +1100642 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
Paul Mackerras9994a332005-10-10 22:36:14 +1000643 bne do_work
644
645restore_user:
646#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
647 /* Check whether this process has its own DBCR0 value. The single
648 step bit tells us that dbcr0 should be loaded. */
649 lwz r0,THREAD+THREAD_DBCR0(r2)
650 andis. r10,r0,DBCR0_IC@h
651 bnel- load_dbcr0
652#endif
653
654#ifdef CONFIG_PREEMPT
655 b restore
656
657/* N.B. the only way to get here is from the beq following ret_from_except. */
658resume_kernel:
659 /* check current_thread_info->preempt_count */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000660 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000661 lwz r0,TI_PREEMPT(r9)
662 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
663 bne restore
664 lwz r0,TI_FLAGS(r9)
665 andi. r0,r0,_TIF_NEED_RESCHED
666 beq+ restore
667 andi. r0,r3,MSR_EE /* interrupts off? */
668 beq restore /* don't schedule if so */
6691: bl preempt_schedule_irq
David Gibson6cb7bfe2005-10-21 15:45:50 +1000670 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000671 lwz r3,TI_FLAGS(r9)
672 andi. r0,r3,_TIF_NEED_RESCHED
673 bne- 1b
674#else
675resume_kernel:
676#endif /* CONFIG_PREEMPT */
677
678 /* interrupts are hard-disabled at this point */
679restore:
680 lwz r0,GPR0(r1)
681 lwz r2,GPR2(r1)
682 REST_4GPRS(3, r1)
683 REST_2GPRS(7, r1)
684
685 lwz r10,_XER(r1)
686 lwz r11,_CTR(r1)
687 mtspr SPRN_XER,r10
688 mtctr r11
689
690 PPC405_ERR77(0,r1)
691 stwcx. r0,0,r1 /* to clear the reservation */
692
693#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
694 lwz r9,_MSR(r1)
695 andi. r10,r9,MSR_RI /* check if this exception occurred */
696 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
697
698 lwz r10,_CCR(r1)
699 lwz r11,_LINK(r1)
700 mtcrf 0xFF,r10
701 mtlr r11
702
703 /*
704 * Once we put values in SRR0 and SRR1, we are in a state
705 * where exceptions are not recoverable, since taking an
706 * exception will trash SRR0 and SRR1. Therefore we clear the
707 * MSR:RI bit to indicate this. If we do take an exception,
708 * we can't return to the point of the exception but we
709 * can restart the exception exit path at the label
710 * exc_exit_restart below. -- paulus
711 */
712 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
713 SYNC
714 MTMSRD(r10) /* clear the RI bit */
715 .globl exc_exit_restart
716exc_exit_restart:
717 lwz r9,_MSR(r1)
718 lwz r12,_NIP(r1)
719 FIX_SRR1(r9,r10)
720 mtspr SPRN_SRR0,r12
721 mtspr SPRN_SRR1,r9
722 REST_4GPRS(9, r1)
723 lwz r1,GPR1(r1)
724 .globl exc_exit_restart_end
725exc_exit_restart_end:
726 SYNC
727 RFI
728
729#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
730 /*
731 * This is a bit different on 4xx/Book-E because it doesn't have
732 * the RI bit in the MSR.
733 * The TLB miss handler checks if we have interrupted
734 * the exception exit path and restarts it if so
735 * (well maybe one day it will... :).
736 */
737 lwz r11,_LINK(r1)
738 mtlr r11
739 lwz r10,_CCR(r1)
740 mtcrf 0xff,r10
741 REST_2GPRS(9, r1)
742 .globl exc_exit_restart
743exc_exit_restart:
744 lwz r11,_NIP(r1)
745 lwz r12,_MSR(r1)
746exc_exit_start:
747 mtspr SPRN_SRR0,r11
748 mtspr SPRN_SRR1,r12
749 REST_2GPRS(11, r1)
750 lwz r1,GPR1(r1)
751 .globl exc_exit_restart_end
752exc_exit_restart_end:
753 PPC405_ERR77_SYNC
754 rfi
755 b . /* prevent prefetch past rfi */
756
757/*
758 * Returning from a critical interrupt in user mode doesn't need
759 * to be any different from a normal exception. For a critical
760 * interrupt in the kernel, we just return (without checking for
761 * preemption) since the interrupt may have happened at some crucial
762 * place (e.g. inside the TLB miss handler), and because we will be
763 * running with r1 pointing into critical_stack, not the current
764 * process's kernel stack (and therefore current_thread_info() will
765 * give the wrong answer).
766 * We have to restore various SPRs that may have been in use at the
767 * time of the critical interrupt.
768 *
769 */
770#ifdef CONFIG_40x
771#define PPC_40x_TURN_OFF_MSR_DR \
772 /* avoid any possible TLB misses here by turning off MSR.DR, we \
773 * assume the instructions here are mapped by a pinned TLB entry */ \
774 li r10,MSR_IR; \
775 mtmsr r10; \
776 isync; \
777 tophys(r1, r1);
778#else
779#define PPC_40x_TURN_OFF_MSR_DR
780#endif
781
782#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
783 REST_NVGPRS(r1); \
784 lwz r3,_MSR(r1); \
785 andi. r3,r3,MSR_PR; \
786 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
787 bne user_exc_return; \
788 lwz r0,GPR0(r1); \
789 lwz r2,GPR2(r1); \
790 REST_4GPRS(3, r1); \
791 REST_2GPRS(7, r1); \
792 lwz r10,_XER(r1); \
793 lwz r11,_CTR(r1); \
794 mtspr SPRN_XER,r10; \
795 mtctr r11; \
796 PPC405_ERR77(0,r1); \
797 stwcx. r0,0,r1; /* to clear the reservation */ \
798 lwz r11,_LINK(r1); \
799 mtlr r11; \
800 lwz r10,_CCR(r1); \
801 mtcrf 0xff,r10; \
802 PPC_40x_TURN_OFF_MSR_DR; \
803 lwz r9,_DEAR(r1); \
804 lwz r10,_ESR(r1); \
805 mtspr SPRN_DEAR,r9; \
806 mtspr SPRN_ESR,r10; \
807 lwz r11,_NIP(r1); \
808 lwz r12,_MSR(r1); \
809 mtspr exc_lvl_srr0,r11; \
810 mtspr exc_lvl_srr1,r12; \
811 lwz r9,GPR9(r1); \
812 lwz r12,GPR12(r1); \
813 lwz r10,GPR10(r1); \
814 lwz r11,GPR11(r1); \
815 lwz r1,GPR1(r1); \
816 PPC405_ERR77_SYNC; \
817 exc_lvl_rfi; \
818 b .; /* prevent prefetch past exc_lvl_rfi */
819
820 .globl ret_from_crit_exc
821ret_from_crit_exc:
822 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
823
824#ifdef CONFIG_BOOKE
825 .globl ret_from_debug_exc
826ret_from_debug_exc:
827 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
828
829 .globl ret_from_mcheck_exc
830ret_from_mcheck_exc:
831 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
832#endif /* CONFIG_BOOKE */
833
834/*
835 * Load the DBCR0 value for a task that is being ptraced,
836 * having first saved away the global DBCR0. Note that r0
837 * has the dbcr0 value to set upon entry to this.
838 */
839load_dbcr0:
840 mfmsr r10 /* first disable debug exceptions */
841 rlwinm r10,r10,0,~MSR_DE
842 mtmsr r10
843 isync
844 mfspr r10,SPRN_DBCR0
845 lis r11,global_dbcr0@ha
846 addi r11,r11,global_dbcr0@l
847 stw r10,0(r11)
848 mtspr SPRN_DBCR0,r0
849 lwz r10,4(r11)
850 addi r10,r10,1
851 stw r10,4(r11)
852 li r11,-1
853 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
854 blr
855
856 .comm global_dbcr0,8
857#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
858
859do_work: /* r10 contains MSR_KERNEL here */
860 andi. r0,r9,_TIF_NEED_RESCHED
861 beq do_user_signal
862
863do_resched: /* r10 contains MSR_KERNEL here */
864 ori r10,r10,MSR_EE
865 SYNC
866 MTMSRD(r10) /* hard-enable interrupts */
867 bl schedule
868recheck:
869 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
870 SYNC
871 MTMSRD(r10) /* disable interrupts */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000872 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000873 lwz r9,TI_FLAGS(r9)
874 andi. r0,r9,_TIF_NEED_RESCHED
875 bne- do_resched
David Woodhousef27201d2006-01-18 17:44:01 -0800876 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000877 beq restore_user
878do_user_signal: /* r10 contains MSR_KERNEL here */
879 ori r10,r10,MSR_EE
880 SYNC
881 MTMSRD(r10) /* hard-enable interrupts */
882 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000883 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000884 andi. r0,r3,1
885 beq 2f
886 SAVE_NVGPRS(r1)
887 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000888 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10008892: li r3,0
890 addi r4,r1,STACK_FRAME_OVERHEAD
891 bl do_signal
892 REST_NVGPRS(r1)
893 b recheck
894
895/*
896 * We come here when we are at the end of handling an exception
897 * that occurred at a place where taking an exception will lose
898 * state information, such as the contents of SRR0 and SRR1.
899 */
900nonrecoverable:
901 lis r10,exc_exit_restart_end@ha
902 addi r10,r10,exc_exit_restart_end@l
903 cmplw r12,r10
904 bge 3f
905 lis r11,exc_exit_restart@ha
906 addi r11,r11,exc_exit_restart@l
907 cmplw r12,r11
908 blt 3f
909 lis r10,ee_restarts@ha
910 lwz r12,ee_restarts@l(r10)
911 addi r12,r12,1
912 stw r12,ee_restarts@l(r10)
913 mr r12,r11 /* restart at exc_exit_restart */
914 blr
9153: /* OK, we can't recover, kill this process */
916 /* but the 601 doesn't implement the RI bit, so assume it's OK */
917BEGIN_FTR_SECTION
918 blr
919END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000920 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000921 andi. r0,r3,1
922 beq 4f
923 SAVE_NVGPRS(r1)
924 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000925 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10009264: addi r3,r1,STACK_FRAME_OVERHEAD
927 bl nonrecoverable_exception
928 /* shouldn't return */
929 b 4b
930
931 .comm ee_restarts,4
932
933/*
934 * PROM code for specific machines follows. Put it
935 * here so it's easy to add arch-specific sections later.
936 * -- Cort
937 */
Paul Mackerras033ef332005-10-26 17:05:24 +1000938#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +1000939/*
940 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
941 * called with the MMU off.
942 */
943_GLOBAL(enter_rtas)
944 stwu r1,-INT_FRAME_SIZE(r1)
945 mflr r0
946 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +1100947 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000948 lis r6,1f@ha /* physical return address for rtas */
949 addi r6,r6,1f@l
950 tophys(r6,r6)
951 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +1000952 lwz r8,RTASENTRY(r4)
953 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000954 mfmsr r9
955 stw r9,8(r1)
956 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
957 SYNC /* disable interrupts so SRR0/1 */
958 MTMSRD(r0) /* don't get trashed */
959 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
960 mtlr r6
Paul Mackerras9994a332005-10-10 22:36:14 +1000961 mtspr SPRN_SPRG2,r7
962 mtspr SPRN_SRR0,r8
963 mtspr SPRN_SRR1,r9
964 RFI
9651: tophys(r9,r1)
966 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
967 lwz r9,8(r9) /* original msr value */
968 FIX_SRR1(r9,r0)
969 addi r1,r1,INT_FRAME_SIZE
970 li r0,0
971 mtspr SPRN_SPRG2,r0
972 mtspr SPRN_SRR0,r8
973 mtspr SPRN_SRR1,r9
974 RFI /* return to caller */
975
976 .globl machine_check_in_rtas
977machine_check_in_rtas:
978 twi 31,0,0
979 /* XXX load up BATs and panic */
980
Paul Mackerras033ef332005-10-26 17:05:24 +1000981#endif /* CONFIG_PPC_RTAS */