blob: d4df68629cc6a750a22e1106018057b4fef9952c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
Kumar Gala1492ec82005-06-21 17:15:27 -070049#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
Kumar Gala1492ec82005-06-21 17:15:27 -070060 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 b transfer_to_handler_full
62
Kumar Gala33d9e9b2005-06-25 14:54:37 -070063 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
Kumar Gala1492ec82005-06-21 17:15:27 -070070 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
188 .stabs "entry.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,18 /* current_thread_info() */
203 lwz r11,TI_LOCAL_FLAGS(r10)
204 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
205 stw r11,TI_LOCAL_FLAGS(r10)
206 lwz r11,TI_FLAGS(r10)
David Woodhouseea9c1022005-05-08 15:56:09 +0100207 andi. r11,r11,_TIF_SYSCALL_T_OR_A
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 bne- syscall_dotrace
209syscall_dotrace_cont:
210 cmplwi 0,r0,NR_syscalls
211 lis r10,sys_call_table@h
212 ori r10,r10,sys_call_table@l
213 slwi r0,r0,2
214 bge- 66f
215 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
216 mtlr r10
217 addi r9,r1,STACK_FRAME_OVERHEAD
218 blrl /* Call handler */
219 .globl ret_from_syscall
220ret_from_syscall:
221#ifdef SHOW_SYSCALLS
222 bl do_show_syscall_exit
223#endif
224 mr r6,r3
225 li r11,-_LAST_ERRNO
226 cmplw 0,r3,r11
227 rlwinm r12,r1,0,0,18 /* current_thread_info() */
228 blt+ 30f
229 lwz r11,TI_LOCAL_FLAGS(r12)
230 andi. r11,r11,_TIFL_FORCE_NOERROR
231 bne 30f
232 neg r3,r3
233 lwz r10,_CCR(r1) /* Set SO bit in CR */
234 oris r10,r10,0x1000
235 stw r10,_CCR(r1)
236
237 /* disable interrupts so current_thread_info()->flags can't change */
23830: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
239 SYNC
240 MTMSRD(r10)
241 lwz r9,TI_FLAGS(r12)
David Woodhouseea9c1022005-05-08 15:56:09 +0100242 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 bne- syscall_exit_work
244syscall_exit_cont:
245#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
246 /* If the process has its own DBCR0 value, load it up. The single
247 step bit tells us that dbcr0 should be loaded. */
248 lwz r0,THREAD+THREAD_DBCR0(r2)
249 andis. r10,r0,DBCR0_IC@h
250 bnel- load_dbcr0
251#endif
252 stwcx. r0,0,r1 /* to clear the reservation */
253 lwz r4,_LINK(r1)
254 lwz r5,_CCR(r1)
255 mtlr r4
256 mtcr r5
257 lwz r7,_NIP(r1)
258 lwz r8,_MSR(r1)
259 FIX_SRR1(r8, r0)
260 lwz r2,GPR2(r1)
261 lwz r1,GPR1(r1)
262 mtspr SPRN_SRR0,r7
263 mtspr SPRN_SRR1,r8
264 SYNC
265 RFI
266
26766: li r3,-ENOSYS
268 b ret_from_syscall
269
270 .globl ret_from_fork
271ret_from_fork:
272 REST_NVGPRS(r1)
273 bl schedule_tail
274 li r3,0
275 b ret_from_syscall
276
277/* Traced system call support */
278syscall_dotrace:
279 SAVE_NVGPRS(r1)
280 li r0,0xc00
281 stw r0,TRAP(r1)
David Woodhouseea9c1022005-05-08 15:56:09 +0100282 addi r3,r1,STACK_FRAME_OVERHEAD
283 bl do_syscall_trace_enter
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 lwz r0,GPR0(r1) /* Restore original registers */
285 lwz r3,GPR3(r1)
286 lwz r4,GPR4(r1)
287 lwz r5,GPR5(r1)
288 lwz r6,GPR6(r1)
289 lwz r7,GPR7(r1)
290 lwz r8,GPR8(r1)
291 REST_NVGPRS(r1)
292 b syscall_dotrace_cont
293
294syscall_exit_work:
295 stw r6,RESULT(r1) /* Save result */
296 stw r3,GPR3(r1) /* Update return value */
David Woodhouseea9c1022005-05-08 15:56:09 +0100297 andi. r0,r9,_TIF_SYSCALL_T_OR_A
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 beq 5f
299 ori r10,r10,MSR_EE
300 SYNC
301 MTMSRD(r10) /* re-enable interrupts */
302 lwz r4,TRAP(r1)
303 andi. r4,r4,1
304 beq 4f
305 SAVE_NVGPRS(r1)
306 li r4,0xc00
307 stw r4,TRAP(r1)
3084:
David Woodhouseea9c1022005-05-08 15:56:09 +0100309 addi r3,r1,STACK_FRAME_OVERHEAD
310 bl do_syscall_trace_leave
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 REST_NVGPRS(r1)
3122:
313 lwz r3,GPR3(r1)
314 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
315 SYNC
316 MTMSRD(r10) /* disable interrupts again */
317 rlwinm r12,r1,0,0,18 /* current_thread_info() */
318 lwz r9,TI_FLAGS(r12)
3195:
320 andi. r0,r9,_TIF_NEED_RESCHED
321 bne 1f
322 lwz r5,_MSR(r1)
323 andi. r5,r5,MSR_PR
324 beq syscall_exit_cont
325 andi. r0,r9,_TIF_SIGPENDING
326 beq syscall_exit_cont
327 b do_user_signal
3281:
329 ori r10,r10,MSR_EE
330 SYNC
331 MTMSRD(r10) /* re-enable interrupts */
332 bl schedule
333 b 2b
334
335#ifdef SHOW_SYSCALLS
336do_show_syscall:
337#ifdef SHOW_SYSCALLS_TASK
338 lis r11,show_syscalls_task@ha
339 lwz r11,show_syscalls_task@l(r11)
340 cmp 0,r2,r11
341 bnelr
342#endif
343 stw r31,GPR31(r1)
344 mflr r31
345 lis r3,7f@ha
346 addi r3,r3,7f@l
347 lwz r4,GPR0(r1)
348 lwz r5,GPR3(r1)
349 lwz r6,GPR4(r1)
350 lwz r7,GPR5(r1)
351 lwz r8,GPR6(r1)
352 lwz r9,GPR7(r1)
353 bl printk
354 lis r3,77f@ha
355 addi r3,r3,77f@l
356 lwz r4,GPR8(r1)
357 mr r5,r2
358 bl printk
359 lwz r0,GPR0(r1)
360 lwz r3,GPR3(r1)
361 lwz r4,GPR4(r1)
362 lwz r5,GPR5(r1)
363 lwz r6,GPR6(r1)
364 lwz r7,GPR7(r1)
365 lwz r8,GPR8(r1)
366 mtlr r31
367 lwz r31,GPR31(r1)
368 blr
369
370do_show_syscall_exit:
371#ifdef SHOW_SYSCALLS_TASK
372 lis r11,show_syscalls_task@ha
373 lwz r11,show_syscalls_task@l(r11)
374 cmp 0,r2,r11
375 bnelr
376#endif
377 stw r31,GPR31(r1)
378 mflr r31
379 stw r3,RESULT(r1) /* Save result */
380 mr r4,r3
381 lis r3,79f@ha
382 addi r3,r3,79f@l
383 bl printk
384 lwz r3,RESULT(r1)
385 mtlr r31
386 lwz r31,GPR31(r1)
387 blr
388
3897: .string "syscall %d(%x, %x, %x, %x, %x, "
39077: .string "%x), current=%p\n"
39179: .string " -> %x\n"
392 .align 2,0
393
394#ifdef SHOW_SYSCALLS_TASK
395 .data
396 .globl show_syscalls_task
397show_syscalls_task:
398 .long -1
399 .text
400#endif
401#endif /* SHOW_SYSCALLS */
402
403/*
404 * The sigsuspend and rt_sigsuspend system calls can call do_signal
405 * and thus put the process into the stopped state where we might
406 * want to examine its user state with ptrace. Therefore we need
407 * to save all the nonvolatile registers (r13 - r31) before calling
408 * the C code.
409 */
410 .globl ppc_sigsuspend
411ppc_sigsuspend:
412 SAVE_NVGPRS(r1)
413 lwz r0,TRAP(r1)
414 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
415 stw r0,TRAP(r1) /* register set saved */
416 b sys_sigsuspend
417
418 .globl ppc_rt_sigsuspend
419ppc_rt_sigsuspend:
420 SAVE_NVGPRS(r1)
421 lwz r0,TRAP(r1)
422 rlwinm r0,r0,0,0,30
423 stw r0,TRAP(r1)
424 b sys_rt_sigsuspend
425
426 .globl ppc_fork
427ppc_fork:
428 SAVE_NVGPRS(r1)
429 lwz r0,TRAP(r1)
430 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
431 stw r0,TRAP(r1) /* register set saved */
432 b sys_fork
433
434 .globl ppc_vfork
435ppc_vfork:
436 SAVE_NVGPRS(r1)
437 lwz r0,TRAP(r1)
438 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
439 stw r0,TRAP(r1) /* register set saved */
440 b sys_vfork
441
442 .globl ppc_clone
443ppc_clone:
444 SAVE_NVGPRS(r1)
445 lwz r0,TRAP(r1)
446 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
447 stw r0,TRAP(r1) /* register set saved */
448 b sys_clone
449
450 .globl ppc_swapcontext
451ppc_swapcontext:
452 SAVE_NVGPRS(r1)
453 lwz r0,TRAP(r1)
454 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
455 stw r0,TRAP(r1) /* register set saved */
456 b sys_swapcontext
457
458/*
459 * Top-level page fault handling.
460 * This is in assembler because if do_page_fault tells us that
461 * it is a bad kernel page fault, we want to save the non-volatile
462 * registers before calling bad_page_fault.
463 */
464 .globl handle_page_fault
465handle_page_fault:
466 stw r4,_DAR(r1)
467 addi r3,r1,STACK_FRAME_OVERHEAD
468 bl do_page_fault
469 cmpwi r3,0
470 beq+ ret_from_except
471 SAVE_NVGPRS(r1)
472 lwz r0,TRAP(r1)
473 clrrwi r0,r0,1
474 stw r0,TRAP(r1)
475 mr r5,r3
476 addi r3,r1,STACK_FRAME_OVERHEAD
477 lwz r4,_DAR(r1)
478 bl bad_page_fault
479 b ret_from_except_full
480
481/*
482 * This routine switches between two different tasks. The process
483 * state of one is saved on its kernel stack. Then the state
484 * of the other is restored from its kernel stack. The memory
485 * management hardware is updated to the second process's state.
486 * Finally, we can return to the second process.
487 * On entry, r3 points to the THREAD for the current task, r4
488 * points to the THREAD for the new task.
489 *
490 * This routine is always called with interrupts disabled.
491 *
492 * Note: there are two ways to get to the "going out" portion
493 * of this code; either by coming in via the entry (_switch)
494 * or via "fork" which must set up an environment equivalent
495 * to the "_switch" path. If you change this , you'll have to
496 * change the fork code also.
497 *
498 * The code which creates the new task context is in 'copy_thread'
499 * in arch/ppc/kernel/process.c
500 */
501_GLOBAL(_switch)
502 stwu r1,-INT_FRAME_SIZE(r1)
503 mflr r0
504 stw r0,INT_FRAME_SIZE+4(r1)
505 /* r3-r12 are caller saved -- Cort */
506 SAVE_NVGPRS(r1)
507 stw r0,_NIP(r1) /* Return to switch caller */
508 mfmsr r11
509 li r0,MSR_FP /* Disable floating-point */
510#ifdef CONFIG_ALTIVEC
511BEGIN_FTR_SECTION
512 oris r0,r0,MSR_VEC@h /* Disable altivec */
513 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
514 stw r12,THREAD+THREAD_VRSAVE(r2)
515END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
516#endif /* CONFIG_ALTIVEC */
517#ifdef CONFIG_SPE
518 oris r0,r0,MSR_SPE@h /* Disable SPE */
519 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
520 stw r12,THREAD+THREAD_SPEFSCR(r2)
521#endif /* CONFIG_SPE */
522 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
523 beq+ 1f
524 andc r11,r11,r0
525 MTMSRD(r11)
526 isync
5271: stw r11,_MSR(r1)
528 mfcr r10
529 stw r10,_CCR(r1)
530 stw r1,KSP(r3) /* Set old stack pointer */
531
532#ifdef CONFIG_SMP
533 /* We need a sync somewhere here to make sure that if the
534 * previous task gets rescheduled on another CPU, it sees all
535 * stores it has performed on this one.
536 */
537 sync
538#endif /* CONFIG_SMP */
539
540 tophys(r0,r4)
541 CLR_TOP32(r0)
542 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
543 lwz r1,KSP(r4) /* Load new stack pointer */
544
545 /* save the old current 'last' for return value */
546 mr r3,r2
547 addi r2,r4,-THREAD /* Update current */
548
549#ifdef CONFIG_ALTIVEC
550BEGIN_FTR_SECTION
551 lwz r0,THREAD+THREAD_VRSAVE(r2)
552 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
553END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
554#endif /* CONFIG_ALTIVEC */
555#ifdef CONFIG_SPE
556 lwz r0,THREAD+THREAD_SPEFSCR(r2)
557 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
558#endif /* CONFIG_SPE */
559
560 lwz r0,_CCR(r1)
561 mtcrf 0xFF,r0
562 /* r3-r12 are destroyed -- Cort */
563 REST_NVGPRS(r1)
564
565 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
566 mtlr r4
567 addi r1,r1,INT_FRAME_SIZE
568 blr
569
Paul Mackerras443a8482005-05-01 08:58:40 -0700570 .globl fast_exception_return
571fast_exception_return:
572#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
573 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
574 beq 1f /* if not, we've got problems */
575#endif
576
5772: REST_4GPRS(3, r11)
578 lwz r10,_CCR(r11)
579 REST_GPR(1, r11)
580 mtcr r10
581 lwz r10,_LINK(r11)
582 mtlr r10
583 REST_GPR(10, r11)
584 mtspr SPRN_SRR1,r9
585 mtspr SPRN_SRR0,r12
586 REST_GPR(9, r11)
587 REST_GPR(12, r11)
588 lwz r11,GPR11(r11)
589 SYNC
590 RFI
591
592#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
593/* check if the exception happened in a restartable section */
5941: lis r3,exc_exit_restart_end@ha
595 addi r3,r3,exc_exit_restart_end@l
596 cmplw r12,r3
597 bge 3f
598 lis r4,exc_exit_restart@ha
599 addi r4,r4,exc_exit_restart@l
600 cmplw r12,r4
601 blt 3f
602 lis r3,fee_restarts@ha
603 tophys(r3,r3)
604 lwz r5,fee_restarts@l(r3)
605 addi r5,r5,1
606 stw r5,fee_restarts@l(r3)
607 mr r12,r4 /* restart at exc_exit_restart */
608 b 2b
609
610 .comm fee_restarts,4
611
612/* aargh, a nonrecoverable interrupt, panic */
613/* aargh, we don't know which trap this is */
614/* but the 601 doesn't implement the RI bit, so assume it's OK */
6153:
616BEGIN_FTR_SECTION
617 b 2b
618END_FTR_SECTION_IFSET(CPU_FTR_601)
619 li r10,-1
620 stw r10,TRAP(r11)
621 addi r3,r1,STACK_FRAME_OVERHEAD
622 lis r10,MSR_KERNEL@h
623 ori r10,r10,MSR_KERNEL@l
624 bl transfer_to_handler_full
625 .long nonrecoverable_exception
626 .long ret_from_except
627#endif
628
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 .globl sigreturn_exit
630sigreturn_exit:
631 subi r1,r3,STACK_FRAME_OVERHEAD
632 rlwinm r12,r1,0,0,18 /* current_thread_info() */
633 lwz r9,TI_FLAGS(r12)
David Woodhouseea9c1022005-05-08 15:56:09 +0100634 andi. r0,r9,_TIF_SYSCALL_T_OR_A
635 bnel- do_syscall_trace_leave
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 /* fall through */
637
638 .globl ret_from_except_full
639ret_from_except_full:
640 REST_NVGPRS(r1)
641 /* fall through */
642
643 .globl ret_from_except
644ret_from_except:
645 /* Hard-disable interrupts so that current_thread_info()->flags
646 * can't change between when we test it and when we return
647 * from the interrupt. */
648 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
649 SYNC /* Some chip revs have problems here... */
650 MTMSRD(r10) /* disable interrupts */
651
652 lwz r3,_MSR(r1) /* Returning to user mode? */
653 andi. r0,r3,MSR_PR
654 beq resume_kernel
655
656user_exc_return: /* r10 contains MSR_KERNEL here */
657 /* Check current_thread_info()->flags */
658 rlwinm r9,r1,0,0,18
659 lwz r9,TI_FLAGS(r9)
660 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
661 bne do_work
662
663restore_user:
664#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
665 /* Check whether this process has its own DBCR0 value. The single
666 step bit tells us that dbcr0 should be loaded. */
667 lwz r0,THREAD+THREAD_DBCR0(r2)
668 andis. r10,r0,DBCR0_IC@h
669 bnel- load_dbcr0
670#endif
671
672#ifdef CONFIG_PREEMPT
673 b restore
674
675/* N.B. the only way to get here is from the beq following ret_from_except. */
676resume_kernel:
677 /* check current_thread_info->preempt_count */
678 rlwinm r9,r1,0,0,18
679 lwz r0,TI_PREEMPT(r9)
680 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
681 bne restore
682 lwz r0,TI_FLAGS(r9)
683 andi. r0,r0,_TIF_NEED_RESCHED
684 beq+ restore
685 andi. r0,r3,MSR_EE /* interrupts off? */
686 beq restore /* don't schedule if so */
6871: bl preempt_schedule_irq
688 rlwinm r9,r1,0,0,18
689 lwz r3,TI_FLAGS(r9)
690 andi. r0,r3,_TIF_NEED_RESCHED
691 bne- 1b
692#else
693resume_kernel:
694#endif /* CONFIG_PREEMPT */
695
696 /* interrupts are hard-disabled at this point */
697restore:
698 lwz r0,GPR0(r1)
699 lwz r2,GPR2(r1)
700 REST_4GPRS(3, r1)
701 REST_2GPRS(7, r1)
702
703 lwz r10,_XER(r1)
704 lwz r11,_CTR(r1)
705 mtspr SPRN_XER,r10
706 mtctr r11
707
708 PPC405_ERR77(0,r1)
709 stwcx. r0,0,r1 /* to clear the reservation */
710
711#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
712 lwz r9,_MSR(r1)
713 andi. r10,r9,MSR_RI /* check if this exception occurred */
714 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
715
716 lwz r10,_CCR(r1)
717 lwz r11,_LINK(r1)
718 mtcrf 0xFF,r10
719 mtlr r11
720
721 /*
722 * Once we put values in SRR0 and SRR1, we are in a state
723 * where exceptions are not recoverable, since taking an
724 * exception will trash SRR0 and SRR1. Therefore we clear the
725 * MSR:RI bit to indicate this. If we do take an exception,
726 * we can't return to the point of the exception but we
727 * can restart the exception exit path at the label
728 * exc_exit_restart below. -- paulus
729 */
730 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
731 SYNC
732 MTMSRD(r10) /* clear the RI bit */
733 .globl exc_exit_restart
734exc_exit_restart:
735 lwz r9,_MSR(r1)
736 lwz r12,_NIP(r1)
737 FIX_SRR1(r9,r10)
738 mtspr SPRN_SRR0,r12
739 mtspr SPRN_SRR1,r9
740 REST_4GPRS(9, r1)
741 lwz r1,GPR1(r1)
742 .globl exc_exit_restart_end
743exc_exit_restart_end:
744 SYNC
745 RFI
746
747#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
748 /*
749 * This is a bit different on 4xx/Book-E because it doesn't have
750 * the RI bit in the MSR.
751 * The TLB miss handler checks if we have interrupted
752 * the exception exit path and restarts it if so
753 * (well maybe one day it will... :).
754 */
755 lwz r11,_LINK(r1)
756 mtlr r11
757 lwz r10,_CCR(r1)
758 mtcrf 0xff,r10
759 REST_2GPRS(9, r1)
760 .globl exc_exit_restart
761exc_exit_restart:
762 lwz r11,_NIP(r1)
763 lwz r12,_MSR(r1)
764exc_exit_start:
765 mtspr SPRN_SRR0,r11
766 mtspr SPRN_SRR1,r12
767 REST_2GPRS(11, r1)
768 lwz r1,GPR1(r1)
769 .globl exc_exit_restart_end
770exc_exit_restart_end:
771 PPC405_ERR77_SYNC
772 rfi
773 b . /* prevent prefetch past rfi */
774
775/*
776 * Returning from a critical interrupt in user mode doesn't need
777 * to be any different from a normal exception. For a critical
778 * interrupt in the kernel, we just return (without checking for
779 * preemption) since the interrupt may have happened at some crucial
780 * place (e.g. inside the TLB miss handler), and because we will be
781 * running with r1 pointing into critical_stack, not the current
782 * process's kernel stack (and therefore current_thread_info() will
783 * give the wrong answer).
784 * We have to restore various SPRs that may have been in use at the
785 * time of the critical interrupt.
786 *
787 */
Kumar Gala1492ec82005-06-21 17:15:27 -0700788#ifdef CONFIG_40x
789#define PPC_40x_TURN_OFF_MSR_DR \
790 /* avoid any possible TLB misses here by turning off MSR.DR, we \
791 * assume the instructions here are mapped by a pinned TLB entry */ \
792 li r10,MSR_IR; \
793 mtmsr r10; \
794 isync; \
795 tophys(r1, r1);
796#else
797#define PPC_40x_TURN_OFF_MSR_DR
798#endif
799
800#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
801 REST_NVGPRS(r1); \
802 lwz r3,_MSR(r1); \
803 andi. r3,r3,MSR_PR; \
804 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
805 bne user_exc_return; \
806 lwz r0,GPR0(r1); \
807 lwz r2,GPR2(r1); \
808 REST_4GPRS(3, r1); \
809 REST_2GPRS(7, r1); \
810 lwz r10,_XER(r1); \
811 lwz r11,_CTR(r1); \
812 mtspr SPRN_XER,r10; \
813 mtctr r11; \
814 PPC405_ERR77(0,r1); \
815 stwcx. r0,0,r1; /* to clear the reservation */ \
816 lwz r11,_LINK(r1); \
817 mtlr r11; \
818 lwz r10,_CCR(r1); \
819 mtcrf 0xff,r10; \
820 PPC_40x_TURN_OFF_MSR_DR; \
821 lwz r9,_DEAR(r1); \
822 lwz r10,_ESR(r1); \
823 mtspr SPRN_DEAR,r9; \
824 mtspr SPRN_ESR,r10; \
825 lwz r11,_NIP(r1); \
826 lwz r12,_MSR(r1); \
827 mtspr exc_lvl_srr0,r11; \
828 mtspr exc_lvl_srr1,r12; \
829 lwz r9,GPR9(r1); \
830 lwz r12,GPR12(r1); \
831 lwz r10,GPR10(r1); \
832 lwz r11,GPR11(r1); \
833 lwz r1,GPR1(r1); \
834 PPC405_ERR77_SYNC; \
835 exc_lvl_rfi; \
836 b .; /* prevent prefetch past exc_lvl_rfi */
837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 .globl ret_from_crit_exc
839ret_from_crit_exc:
Kumar Gala1492ec82005-06-21 17:15:27 -0700840 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842#ifdef CONFIG_BOOKE
Kumar Gala33d9e9b2005-06-25 14:54:37 -0700843 .globl ret_from_debug_exc
844ret_from_debug_exc:
845 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 .globl ret_from_mcheck_exc
848ret_from_mcheck_exc:
Kumar Gala1492ec82005-06-21 17:15:27 -0700849 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850#endif /* CONFIG_BOOKE */
851
852/*
853 * Load the DBCR0 value for a task that is being ptraced,
854 * having first saved away the global DBCR0. Note that r0
855 * has the dbcr0 value to set upon entry to this.
856 */
857load_dbcr0:
858 mfmsr r10 /* first disable debug exceptions */
859 rlwinm r10,r10,0,~MSR_DE
860 mtmsr r10
861 isync
862 mfspr r10,SPRN_DBCR0
863 lis r11,global_dbcr0@ha
864 addi r11,r11,global_dbcr0@l
865 stw r10,0(r11)
866 mtspr SPRN_DBCR0,r0
867 lwz r10,4(r11)
868 addi r10,r10,1
869 stw r10,4(r11)
870 li r11,-1
871 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
872 blr
873
874 .comm global_dbcr0,8
875#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
876
877do_work: /* r10 contains MSR_KERNEL here */
878 andi. r0,r9,_TIF_NEED_RESCHED
879 beq do_user_signal
880
881do_resched: /* r10 contains MSR_KERNEL here */
882 ori r10,r10,MSR_EE
883 SYNC
884 MTMSRD(r10) /* hard-enable interrupts */
885 bl schedule
886recheck:
887 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
888 SYNC
889 MTMSRD(r10) /* disable interrupts */
890 rlwinm r9,r1,0,0,18
891 lwz r9,TI_FLAGS(r9)
892 andi. r0,r9,_TIF_NEED_RESCHED
893 bne- do_resched
894 andi. r0,r9,_TIF_SIGPENDING
895 beq restore_user
896do_user_signal: /* r10 contains MSR_KERNEL here */
897 ori r10,r10,MSR_EE
898 SYNC
899 MTMSRD(r10) /* hard-enable interrupts */
900 /* save r13-r31 in the exception frame, if not already done */
901 lwz r3,TRAP(r1)
902 andi. r0,r3,1
903 beq 2f
904 SAVE_NVGPRS(r1)
905 rlwinm r3,r3,0,0,30
906 stw r3,TRAP(r1)
9072: li r3,0
908 addi r4,r1,STACK_FRAME_OVERHEAD
909 bl do_signal
910 REST_NVGPRS(r1)
911 b recheck
912
913/*
914 * We come here when we are at the end of handling an exception
915 * that occurred at a place where taking an exception will lose
916 * state information, such as the contents of SRR0 and SRR1.
917 */
918nonrecoverable:
919 lis r10,exc_exit_restart_end@ha
920 addi r10,r10,exc_exit_restart_end@l
921 cmplw r12,r10
922 bge 3f
923 lis r11,exc_exit_restart@ha
924 addi r11,r11,exc_exit_restart@l
925 cmplw r12,r11
926 blt 3f
927 lis r10,ee_restarts@ha
928 lwz r12,ee_restarts@l(r10)
929 addi r12,r12,1
930 stw r12,ee_restarts@l(r10)
931 mr r12,r11 /* restart at exc_exit_restart */
932 blr
9333: /* OK, we can't recover, kill this process */
934 /* but the 601 doesn't implement the RI bit, so assume it's OK */
935BEGIN_FTR_SECTION
936 blr
937END_FTR_SECTION_IFSET(CPU_FTR_601)
938 lwz r3,TRAP(r1)
939 andi. r0,r3,1
940 beq 4f
941 SAVE_NVGPRS(r1)
942 rlwinm r3,r3,0,0,30
943 stw r3,TRAP(r1)
9444: addi r3,r1,STACK_FRAME_OVERHEAD
945 bl nonrecoverable_exception
946 /* shouldn't return */
947 b 4b
948
949 .comm ee_restarts,4
950
951/*
952 * PROM code for specific machines follows. Put it
953 * here so it's easy to add arch-specific sections later.
954 * -- Cort
955 */
956#ifdef CONFIG_PPC_OF
957/*
958 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
959 * called with the MMU off.
960 */
961_GLOBAL(enter_rtas)
962 stwu r1,-INT_FRAME_SIZE(r1)
963 mflr r0
964 stw r0,INT_FRAME_SIZE+4(r1)
965 lis r4,rtas_data@ha
966 lwz r4,rtas_data@l(r4)
967 lis r6,1f@ha /* physical return address for rtas */
968 addi r6,r6,1f@l
969 tophys(r6,r6)
970 tophys(r7,r1)
971 lis r8,rtas_entry@ha
972 lwz r8,rtas_entry@l(r8)
973 mfmsr r9
974 stw r9,8(r1)
975 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
976 SYNC /* disable interrupts so SRR0/1 */
977 MTMSRD(r0) /* don't get trashed */
978 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
979 mtlr r6
980 CLR_TOP32(r7)
981 mtspr SPRN_SPRG2,r7
982 mtspr SPRN_SRR0,r8
983 mtspr SPRN_SRR1,r9
984 RFI
9851: tophys(r9,r1)
986 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
987 lwz r9,8(r9) /* original msr value */
988 FIX_SRR1(r9,r0)
989 addi r1,r1,INT_FRAME_SIZE
990 li r0,0
991 mtspr SPRN_SPRG2,r0
992 mtspr SPRN_SRR0,r8
993 mtspr SPRN_SRR1,r9
994 RFI /* return to caller */
995
996 .globl machine_check_in_rtas
997machine_check_in_rtas:
998 twi 31,0,0
999 /* XXX load up BATs and panic */
1000
1001#endif /* CONFIG_PPC_OF */