blob: 8377b6ca26da4039fabc0ba5dd46861edb4e9190 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
Kumar Gala1492ec82005-06-21 17:15:27 -070049#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
Kumar Gala1492ec82005-06-21 17:15:27 -070060 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 b transfer_to_handler_full
62
63 .globl crit_transfer_to_handler
64crit_transfer_to_handler:
Kumar Gala1492ec82005-06-21 17:15:27 -070065 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 /* fall through */
67#endif
68
69#ifdef CONFIG_40x
70 .globl crit_transfer_to_handler
71crit_transfer_to_handler:
72 lwz r0,crit_r10@l(0)
73 stw r0,GPR10(r11)
74 lwz r0,crit_r11@l(0)
75 stw r0,GPR11(r11)
76 /* fall through */
77#endif
78
79/*
80 * This code finishes saving the registers to the exception frame
81 * and jumps to the appropriate handler for the exception, turning
82 * on address translation.
83 * Note that we rely on the caller having set cr0.eq iff the exception
84 * occurred in kernel mode (i.e. MSR:PR = 0).
85 */
86 .globl transfer_to_handler_full
87transfer_to_handler_full:
88 SAVE_NVGPRS(r11)
89 /* fall through */
90
91 .globl transfer_to_handler
92transfer_to_handler:
93 stw r2,GPR2(r11)
94 stw r12,_NIP(r11)
95 stw r9,_MSR(r11)
96 andi. r2,r9,MSR_PR
97 mfctr r12
98 mfspr r2,SPRN_XER
99 stw r12,_CTR(r11)
100 stw r2,_XER(r11)
101 mfspr r12,SPRN_SPRG3
102 addi r2,r12,-THREAD
103 tovirt(r2,r2) /* set r2 to current */
104 beq 2f /* if from user, fix up THREAD.regs */
105 addi r11,r1,STACK_FRAME_OVERHEAD
106 stw r11,PT_REGS(r12)
107#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
108 /* Check to see if the dbcr0 register is set up to debug. Use the
109 single-step bit to do this. */
110 lwz r12,THREAD_DBCR0(r12)
111 andis. r12,r12,DBCR0_IC@h
112 beq+ 3f
113 /* From user and task is ptraced - load up global dbcr0 */
114 li r12,-1 /* clear all pending debug events */
115 mtspr SPRN_DBSR,r12
116 lis r11,global_dbcr0@ha
117 tophys(r11,r11)
118 addi r11,r11,global_dbcr0@l
119 lwz r12,0(r11)
120 mtspr SPRN_DBCR0,r12
121 lwz r12,4(r11)
122 addi r12,r12,-1
123 stw r12,4(r11)
124#endif
125 b 3f
1262: /* if from kernel, check interrupted DOZE/NAP mode and
127 * check for stack overflow
128 */
129#ifdef CONFIG_6xx
130 mfspr r11,SPRN_HID0
131 mtcr r11
132BEGIN_FTR_SECTION
133 bt- 8,power_save_6xx_restore /* Check DOZE */
134END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
135BEGIN_FTR_SECTION
136 bt- 9,power_save_6xx_restore /* Check NAP */
137END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
138#endif /* CONFIG_6xx */
139 .globl transfer_to_handler_cont
140transfer_to_handler_cont:
141 lwz r11,THREAD_INFO-THREAD(r12)
142 cmplw r1,r11 /* if r1 <= current->thread_info */
143 ble- stack_ovf /* then the kernel stack overflowed */
1443:
145 mflr r9
146 lwz r11,0(r9) /* virtual address of handler */
147 lwz r9,4(r9) /* where to go when done */
148 FIX_SRR1(r10,r12)
149 mtspr SPRN_SRR0,r11
150 mtspr SPRN_SRR1,r10
151 mtlr r9
152 SYNC
153 RFI /* jump to handler, enable MMU */
154
155/*
156 * On kernel stack overflow, load up an initial stack pointer
157 * and call StackOverflow(regs), which should not return.
158 */
159stack_ovf:
160 /* sometimes we use a statically-allocated stack, which is OK. */
161 lis r11,_end@h
162 ori r11,r11,_end@l
163 cmplw r1,r11
164 ble 3b /* r1 <= &_end is OK */
165 SAVE_NVGPRS(r11)
166 addi r3,r1,STACK_FRAME_OVERHEAD
167 lis r1,init_thread_union@ha
168 addi r1,r1,init_thread_union@l
169 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
170 lis r9,StackOverflow@ha
171 addi r9,r9,StackOverflow@l
172 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
173 FIX_SRR1(r10,r12)
174 mtspr SPRN_SRR0,r9
175 mtspr SPRN_SRR1,r10
176 SYNC
177 RFI
178
179/*
180 * Handle a system call.
181 */
182 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
183 .stabs "entry.S",N_SO,0,0,0f
1840:
185
186_GLOBAL(DoSyscall)
187 stw r0,THREAD+LAST_SYSCALL(r2)
188 stw r3,ORIG_GPR3(r1)
189 li r12,0
190 stw r12,RESULT(r1)
191 lwz r11,_CCR(r1) /* Clear SO bit in CR */
192 rlwinm r11,r11,0,4,2
193 stw r11,_CCR(r1)
194#ifdef SHOW_SYSCALLS
195 bl do_show_syscall
196#endif /* SHOW_SYSCALLS */
197 rlwinm r10,r1,0,0,18 /* current_thread_info() */
198 lwz r11,TI_LOCAL_FLAGS(r10)
199 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
200 stw r11,TI_LOCAL_FLAGS(r10)
201 lwz r11,TI_FLAGS(r10)
David Woodhouseea9c1022005-05-08 15:56:09 +0100202 andi. r11,r11,_TIF_SYSCALL_T_OR_A
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 bne- syscall_dotrace
204syscall_dotrace_cont:
205 cmplwi 0,r0,NR_syscalls
206 lis r10,sys_call_table@h
207 ori r10,r10,sys_call_table@l
208 slwi r0,r0,2
209 bge- 66f
210 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
211 mtlr r10
212 addi r9,r1,STACK_FRAME_OVERHEAD
213 blrl /* Call handler */
214 .globl ret_from_syscall
215ret_from_syscall:
216#ifdef SHOW_SYSCALLS
217 bl do_show_syscall_exit
218#endif
219 mr r6,r3
220 li r11,-_LAST_ERRNO
221 cmplw 0,r3,r11
222 rlwinm r12,r1,0,0,18 /* current_thread_info() */
223 blt+ 30f
224 lwz r11,TI_LOCAL_FLAGS(r12)
225 andi. r11,r11,_TIFL_FORCE_NOERROR
226 bne 30f
227 neg r3,r3
228 lwz r10,_CCR(r1) /* Set SO bit in CR */
229 oris r10,r10,0x1000
230 stw r10,_CCR(r1)
231
232 /* disable interrupts so current_thread_info()->flags can't change */
23330: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
234 SYNC
235 MTMSRD(r10)
236 lwz r9,TI_FLAGS(r12)
David Woodhouseea9c1022005-05-08 15:56:09 +0100237 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 bne- syscall_exit_work
239syscall_exit_cont:
240#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
241 /* If the process has its own DBCR0 value, load it up. The single
242 step bit tells us that dbcr0 should be loaded. */
243 lwz r0,THREAD+THREAD_DBCR0(r2)
244 andis. r10,r0,DBCR0_IC@h
245 bnel- load_dbcr0
246#endif
247 stwcx. r0,0,r1 /* to clear the reservation */
248 lwz r4,_LINK(r1)
249 lwz r5,_CCR(r1)
250 mtlr r4
251 mtcr r5
252 lwz r7,_NIP(r1)
253 lwz r8,_MSR(r1)
254 FIX_SRR1(r8, r0)
255 lwz r2,GPR2(r1)
256 lwz r1,GPR1(r1)
257 mtspr SPRN_SRR0,r7
258 mtspr SPRN_SRR1,r8
259 SYNC
260 RFI
261
26266: li r3,-ENOSYS
263 b ret_from_syscall
264
265 .globl ret_from_fork
266ret_from_fork:
267 REST_NVGPRS(r1)
268 bl schedule_tail
269 li r3,0
270 b ret_from_syscall
271
272/* Traced system call support */
273syscall_dotrace:
274 SAVE_NVGPRS(r1)
275 li r0,0xc00
276 stw r0,TRAP(r1)
David Woodhouseea9c1022005-05-08 15:56:09 +0100277 addi r3,r1,STACK_FRAME_OVERHEAD
278 bl do_syscall_trace_enter
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 lwz r0,GPR0(r1) /* Restore original registers */
280 lwz r3,GPR3(r1)
281 lwz r4,GPR4(r1)
282 lwz r5,GPR5(r1)
283 lwz r6,GPR6(r1)
284 lwz r7,GPR7(r1)
285 lwz r8,GPR8(r1)
286 REST_NVGPRS(r1)
287 b syscall_dotrace_cont
288
289syscall_exit_work:
290 stw r6,RESULT(r1) /* Save result */
291 stw r3,GPR3(r1) /* Update return value */
David Woodhouseea9c1022005-05-08 15:56:09 +0100292 andi. r0,r9,_TIF_SYSCALL_T_OR_A
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 beq 5f
294 ori r10,r10,MSR_EE
295 SYNC
296 MTMSRD(r10) /* re-enable interrupts */
297 lwz r4,TRAP(r1)
298 andi. r4,r4,1
299 beq 4f
300 SAVE_NVGPRS(r1)
301 li r4,0xc00
302 stw r4,TRAP(r1)
3034:
David Woodhouseea9c1022005-05-08 15:56:09 +0100304 addi r3,r1,STACK_FRAME_OVERHEAD
305 bl do_syscall_trace_leave
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 REST_NVGPRS(r1)
3072:
308 lwz r3,GPR3(r1)
309 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
310 SYNC
311 MTMSRD(r10) /* disable interrupts again */
312 rlwinm r12,r1,0,0,18 /* current_thread_info() */
313 lwz r9,TI_FLAGS(r12)
3145:
315 andi. r0,r9,_TIF_NEED_RESCHED
316 bne 1f
317 lwz r5,_MSR(r1)
318 andi. r5,r5,MSR_PR
319 beq syscall_exit_cont
320 andi. r0,r9,_TIF_SIGPENDING
321 beq syscall_exit_cont
322 b do_user_signal
3231:
324 ori r10,r10,MSR_EE
325 SYNC
326 MTMSRD(r10) /* re-enable interrupts */
327 bl schedule
328 b 2b
329
330#ifdef SHOW_SYSCALLS
331do_show_syscall:
332#ifdef SHOW_SYSCALLS_TASK
333 lis r11,show_syscalls_task@ha
334 lwz r11,show_syscalls_task@l(r11)
335 cmp 0,r2,r11
336 bnelr
337#endif
338 stw r31,GPR31(r1)
339 mflr r31
340 lis r3,7f@ha
341 addi r3,r3,7f@l
342 lwz r4,GPR0(r1)
343 lwz r5,GPR3(r1)
344 lwz r6,GPR4(r1)
345 lwz r7,GPR5(r1)
346 lwz r8,GPR6(r1)
347 lwz r9,GPR7(r1)
348 bl printk
349 lis r3,77f@ha
350 addi r3,r3,77f@l
351 lwz r4,GPR8(r1)
352 mr r5,r2
353 bl printk
354 lwz r0,GPR0(r1)
355 lwz r3,GPR3(r1)
356 lwz r4,GPR4(r1)
357 lwz r5,GPR5(r1)
358 lwz r6,GPR6(r1)
359 lwz r7,GPR7(r1)
360 lwz r8,GPR8(r1)
361 mtlr r31
362 lwz r31,GPR31(r1)
363 blr
364
365do_show_syscall_exit:
366#ifdef SHOW_SYSCALLS_TASK
367 lis r11,show_syscalls_task@ha
368 lwz r11,show_syscalls_task@l(r11)
369 cmp 0,r2,r11
370 bnelr
371#endif
372 stw r31,GPR31(r1)
373 mflr r31
374 stw r3,RESULT(r1) /* Save result */
375 mr r4,r3
376 lis r3,79f@ha
377 addi r3,r3,79f@l
378 bl printk
379 lwz r3,RESULT(r1)
380 mtlr r31
381 lwz r31,GPR31(r1)
382 blr
383
3847: .string "syscall %d(%x, %x, %x, %x, %x, "
38577: .string "%x), current=%p\n"
38679: .string " -> %x\n"
387 .align 2,0
388
389#ifdef SHOW_SYSCALLS_TASK
390 .data
391 .globl show_syscalls_task
392show_syscalls_task:
393 .long -1
394 .text
395#endif
396#endif /* SHOW_SYSCALLS */
397
398/*
399 * The sigsuspend and rt_sigsuspend system calls can call do_signal
400 * and thus put the process into the stopped state where we might
401 * want to examine its user state with ptrace. Therefore we need
402 * to save all the nonvolatile registers (r13 - r31) before calling
403 * the C code.
404 */
405 .globl ppc_sigsuspend
406ppc_sigsuspend:
407 SAVE_NVGPRS(r1)
408 lwz r0,TRAP(r1)
409 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
410 stw r0,TRAP(r1) /* register set saved */
411 b sys_sigsuspend
412
413 .globl ppc_rt_sigsuspend
414ppc_rt_sigsuspend:
415 SAVE_NVGPRS(r1)
416 lwz r0,TRAP(r1)
417 rlwinm r0,r0,0,0,30
418 stw r0,TRAP(r1)
419 b sys_rt_sigsuspend
420
421 .globl ppc_fork
422ppc_fork:
423 SAVE_NVGPRS(r1)
424 lwz r0,TRAP(r1)
425 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
426 stw r0,TRAP(r1) /* register set saved */
427 b sys_fork
428
429 .globl ppc_vfork
430ppc_vfork:
431 SAVE_NVGPRS(r1)
432 lwz r0,TRAP(r1)
433 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
434 stw r0,TRAP(r1) /* register set saved */
435 b sys_vfork
436
437 .globl ppc_clone
438ppc_clone:
439 SAVE_NVGPRS(r1)
440 lwz r0,TRAP(r1)
441 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
442 stw r0,TRAP(r1) /* register set saved */
443 b sys_clone
444
445 .globl ppc_swapcontext
446ppc_swapcontext:
447 SAVE_NVGPRS(r1)
448 lwz r0,TRAP(r1)
449 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
450 stw r0,TRAP(r1) /* register set saved */
451 b sys_swapcontext
452
453/*
454 * Top-level page fault handling.
455 * This is in assembler because if do_page_fault tells us that
456 * it is a bad kernel page fault, we want to save the non-volatile
457 * registers before calling bad_page_fault.
458 */
459 .globl handle_page_fault
460handle_page_fault:
461 stw r4,_DAR(r1)
462 addi r3,r1,STACK_FRAME_OVERHEAD
463 bl do_page_fault
464 cmpwi r3,0
465 beq+ ret_from_except
466 SAVE_NVGPRS(r1)
467 lwz r0,TRAP(r1)
468 clrrwi r0,r0,1
469 stw r0,TRAP(r1)
470 mr r5,r3
471 addi r3,r1,STACK_FRAME_OVERHEAD
472 lwz r4,_DAR(r1)
473 bl bad_page_fault
474 b ret_from_except_full
475
476/*
477 * This routine switches between two different tasks. The process
478 * state of one is saved on its kernel stack. Then the state
479 * of the other is restored from its kernel stack. The memory
480 * management hardware is updated to the second process's state.
481 * Finally, we can return to the second process.
482 * On entry, r3 points to the THREAD for the current task, r4
483 * points to the THREAD for the new task.
484 *
485 * This routine is always called with interrupts disabled.
486 *
487 * Note: there are two ways to get to the "going out" portion
488 * of this code; either by coming in via the entry (_switch)
489 * or via "fork" which must set up an environment equivalent
490 * to the "_switch" path. If you change this , you'll have to
491 * change the fork code also.
492 *
493 * The code which creates the new task context is in 'copy_thread'
494 * in arch/ppc/kernel/process.c
495 */
496_GLOBAL(_switch)
497 stwu r1,-INT_FRAME_SIZE(r1)
498 mflr r0
499 stw r0,INT_FRAME_SIZE+4(r1)
500 /* r3-r12 are caller saved -- Cort */
501 SAVE_NVGPRS(r1)
502 stw r0,_NIP(r1) /* Return to switch caller */
503 mfmsr r11
504 li r0,MSR_FP /* Disable floating-point */
505#ifdef CONFIG_ALTIVEC
506BEGIN_FTR_SECTION
507 oris r0,r0,MSR_VEC@h /* Disable altivec */
508 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
509 stw r12,THREAD+THREAD_VRSAVE(r2)
510END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
511#endif /* CONFIG_ALTIVEC */
512#ifdef CONFIG_SPE
513 oris r0,r0,MSR_SPE@h /* Disable SPE */
514 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
515 stw r12,THREAD+THREAD_SPEFSCR(r2)
516#endif /* CONFIG_SPE */
517 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
518 beq+ 1f
519 andc r11,r11,r0
520 MTMSRD(r11)
521 isync
5221: stw r11,_MSR(r1)
523 mfcr r10
524 stw r10,_CCR(r1)
525 stw r1,KSP(r3) /* Set old stack pointer */
526
527#ifdef CONFIG_SMP
528 /* We need a sync somewhere here to make sure that if the
529 * previous task gets rescheduled on another CPU, it sees all
530 * stores it has performed on this one.
531 */
532 sync
533#endif /* CONFIG_SMP */
534
535 tophys(r0,r4)
536 CLR_TOP32(r0)
537 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
538 lwz r1,KSP(r4) /* Load new stack pointer */
539
540 /* save the old current 'last' for return value */
541 mr r3,r2
542 addi r2,r4,-THREAD /* Update current */
543
544#ifdef CONFIG_ALTIVEC
545BEGIN_FTR_SECTION
546 lwz r0,THREAD+THREAD_VRSAVE(r2)
547 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
548END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
549#endif /* CONFIG_ALTIVEC */
550#ifdef CONFIG_SPE
551 lwz r0,THREAD+THREAD_SPEFSCR(r2)
552 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
553#endif /* CONFIG_SPE */
554
555 lwz r0,_CCR(r1)
556 mtcrf 0xFF,r0
557 /* r3-r12 are destroyed -- Cort */
558 REST_NVGPRS(r1)
559
560 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
561 mtlr r4
562 addi r1,r1,INT_FRAME_SIZE
563 blr
564
Paul Mackerras443a8482005-05-01 08:58:40 -0700565 .globl fast_exception_return
566fast_exception_return:
567#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
568 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
569 beq 1f /* if not, we've got problems */
570#endif
571
5722: REST_4GPRS(3, r11)
573 lwz r10,_CCR(r11)
574 REST_GPR(1, r11)
575 mtcr r10
576 lwz r10,_LINK(r11)
577 mtlr r10
578 REST_GPR(10, r11)
579 mtspr SPRN_SRR1,r9
580 mtspr SPRN_SRR0,r12
581 REST_GPR(9, r11)
582 REST_GPR(12, r11)
583 lwz r11,GPR11(r11)
584 SYNC
585 RFI
586
587#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
588/* check if the exception happened in a restartable section */
5891: lis r3,exc_exit_restart_end@ha
590 addi r3,r3,exc_exit_restart_end@l
591 cmplw r12,r3
592 bge 3f
593 lis r4,exc_exit_restart@ha
594 addi r4,r4,exc_exit_restart@l
595 cmplw r12,r4
596 blt 3f
597 lis r3,fee_restarts@ha
598 tophys(r3,r3)
599 lwz r5,fee_restarts@l(r3)
600 addi r5,r5,1
601 stw r5,fee_restarts@l(r3)
602 mr r12,r4 /* restart at exc_exit_restart */
603 b 2b
604
605 .comm fee_restarts,4
606
607/* aargh, a nonrecoverable interrupt, panic */
608/* aargh, we don't know which trap this is */
609/* but the 601 doesn't implement the RI bit, so assume it's OK */
6103:
611BEGIN_FTR_SECTION
612 b 2b
613END_FTR_SECTION_IFSET(CPU_FTR_601)
614 li r10,-1
615 stw r10,TRAP(r11)
616 addi r3,r1,STACK_FRAME_OVERHEAD
617 lis r10,MSR_KERNEL@h
618 ori r10,r10,MSR_KERNEL@l
619 bl transfer_to_handler_full
620 .long nonrecoverable_exception
621 .long ret_from_except
622#endif
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 .globl sigreturn_exit
625sigreturn_exit:
626 subi r1,r3,STACK_FRAME_OVERHEAD
627 rlwinm r12,r1,0,0,18 /* current_thread_info() */
628 lwz r9,TI_FLAGS(r12)
David Woodhouseea9c1022005-05-08 15:56:09 +0100629 andi. r0,r9,_TIF_SYSCALL_T_OR_A
630 bnel- do_syscall_trace_leave
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 /* fall through */
632
633 .globl ret_from_except_full
634ret_from_except_full:
635 REST_NVGPRS(r1)
636 /* fall through */
637
638 .globl ret_from_except
639ret_from_except:
640 /* Hard-disable interrupts so that current_thread_info()->flags
641 * can't change between when we test it and when we return
642 * from the interrupt. */
643 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
644 SYNC /* Some chip revs have problems here... */
645 MTMSRD(r10) /* disable interrupts */
646
647 lwz r3,_MSR(r1) /* Returning to user mode? */
648 andi. r0,r3,MSR_PR
649 beq resume_kernel
650
651user_exc_return: /* r10 contains MSR_KERNEL here */
652 /* Check current_thread_info()->flags */
653 rlwinm r9,r1,0,0,18
654 lwz r9,TI_FLAGS(r9)
655 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
656 bne do_work
657
658restore_user:
659#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
660 /* Check whether this process has its own DBCR0 value. The single
661 step bit tells us that dbcr0 should be loaded. */
662 lwz r0,THREAD+THREAD_DBCR0(r2)
663 andis. r10,r0,DBCR0_IC@h
664 bnel- load_dbcr0
665#endif
666
667#ifdef CONFIG_PREEMPT
668 b restore
669
670/* N.B. the only way to get here is from the beq following ret_from_except. */
671resume_kernel:
672 /* check current_thread_info->preempt_count */
673 rlwinm r9,r1,0,0,18
674 lwz r0,TI_PREEMPT(r9)
675 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
676 bne restore
677 lwz r0,TI_FLAGS(r9)
678 andi. r0,r0,_TIF_NEED_RESCHED
679 beq+ restore
680 andi. r0,r3,MSR_EE /* interrupts off? */
681 beq restore /* don't schedule if so */
6821: bl preempt_schedule_irq
683 rlwinm r9,r1,0,0,18
684 lwz r3,TI_FLAGS(r9)
685 andi. r0,r3,_TIF_NEED_RESCHED
686 bne- 1b
687#else
688resume_kernel:
689#endif /* CONFIG_PREEMPT */
690
691 /* interrupts are hard-disabled at this point */
692restore:
693 lwz r0,GPR0(r1)
694 lwz r2,GPR2(r1)
695 REST_4GPRS(3, r1)
696 REST_2GPRS(7, r1)
697
698 lwz r10,_XER(r1)
699 lwz r11,_CTR(r1)
700 mtspr SPRN_XER,r10
701 mtctr r11
702
703 PPC405_ERR77(0,r1)
704 stwcx. r0,0,r1 /* to clear the reservation */
705
706#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
707 lwz r9,_MSR(r1)
708 andi. r10,r9,MSR_RI /* check if this exception occurred */
709 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
710
711 lwz r10,_CCR(r1)
712 lwz r11,_LINK(r1)
713 mtcrf 0xFF,r10
714 mtlr r11
715
716 /*
717 * Once we put values in SRR0 and SRR1, we are in a state
718 * where exceptions are not recoverable, since taking an
719 * exception will trash SRR0 and SRR1. Therefore we clear the
720 * MSR:RI bit to indicate this. If we do take an exception,
721 * we can't return to the point of the exception but we
722 * can restart the exception exit path at the label
723 * exc_exit_restart below. -- paulus
724 */
725 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
726 SYNC
727 MTMSRD(r10) /* clear the RI bit */
728 .globl exc_exit_restart
729exc_exit_restart:
730 lwz r9,_MSR(r1)
731 lwz r12,_NIP(r1)
732 FIX_SRR1(r9,r10)
733 mtspr SPRN_SRR0,r12
734 mtspr SPRN_SRR1,r9
735 REST_4GPRS(9, r1)
736 lwz r1,GPR1(r1)
737 .globl exc_exit_restart_end
738exc_exit_restart_end:
739 SYNC
740 RFI
741
742#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
743 /*
744 * This is a bit different on 4xx/Book-E because it doesn't have
745 * the RI bit in the MSR.
746 * The TLB miss handler checks if we have interrupted
747 * the exception exit path and restarts it if so
748 * (well maybe one day it will... :).
749 */
750 lwz r11,_LINK(r1)
751 mtlr r11
752 lwz r10,_CCR(r1)
753 mtcrf 0xff,r10
754 REST_2GPRS(9, r1)
755 .globl exc_exit_restart
756exc_exit_restart:
757 lwz r11,_NIP(r1)
758 lwz r12,_MSR(r1)
759exc_exit_start:
760 mtspr SPRN_SRR0,r11
761 mtspr SPRN_SRR1,r12
762 REST_2GPRS(11, r1)
763 lwz r1,GPR1(r1)
764 .globl exc_exit_restart_end
765exc_exit_restart_end:
766 PPC405_ERR77_SYNC
767 rfi
768 b . /* prevent prefetch past rfi */
769
770/*
771 * Returning from a critical interrupt in user mode doesn't need
772 * to be any different from a normal exception. For a critical
773 * interrupt in the kernel, we just return (without checking for
774 * preemption) since the interrupt may have happened at some crucial
775 * place (e.g. inside the TLB miss handler), and because we will be
776 * running with r1 pointing into critical_stack, not the current
777 * process's kernel stack (and therefore current_thread_info() will
778 * give the wrong answer).
779 * We have to restore various SPRs that may have been in use at the
780 * time of the critical interrupt.
781 *
782 */
Kumar Gala1492ec82005-06-21 17:15:27 -0700783#ifdef CONFIG_40x
784#define PPC_40x_TURN_OFF_MSR_DR \
785 /* avoid any possible TLB misses here by turning off MSR.DR, we \
786 * assume the instructions here are mapped by a pinned TLB entry */ \
787 li r10,MSR_IR; \
788 mtmsr r10; \
789 isync; \
790 tophys(r1, r1);
791#else
792#define PPC_40x_TURN_OFF_MSR_DR
793#endif
794
795#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
796 REST_NVGPRS(r1); \
797 lwz r3,_MSR(r1); \
798 andi. r3,r3,MSR_PR; \
799 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
800 bne user_exc_return; \
801 lwz r0,GPR0(r1); \
802 lwz r2,GPR2(r1); \
803 REST_4GPRS(3, r1); \
804 REST_2GPRS(7, r1); \
805 lwz r10,_XER(r1); \
806 lwz r11,_CTR(r1); \
807 mtspr SPRN_XER,r10; \
808 mtctr r11; \
809 PPC405_ERR77(0,r1); \
810 stwcx. r0,0,r1; /* to clear the reservation */ \
811 lwz r11,_LINK(r1); \
812 mtlr r11; \
813 lwz r10,_CCR(r1); \
814 mtcrf 0xff,r10; \
815 PPC_40x_TURN_OFF_MSR_DR; \
816 lwz r9,_DEAR(r1); \
817 lwz r10,_ESR(r1); \
818 mtspr SPRN_DEAR,r9; \
819 mtspr SPRN_ESR,r10; \
820 lwz r11,_NIP(r1); \
821 lwz r12,_MSR(r1); \
822 mtspr exc_lvl_srr0,r11; \
823 mtspr exc_lvl_srr1,r12; \
824 lwz r9,GPR9(r1); \
825 lwz r12,GPR12(r1); \
826 lwz r10,GPR10(r1); \
827 lwz r11,GPR11(r1); \
828 lwz r1,GPR1(r1); \
829 PPC405_ERR77_SYNC; \
830 exc_lvl_rfi; \
831 b .; /* prevent prefetch past exc_lvl_rfi */
832
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 .globl ret_from_crit_exc
834ret_from_crit_exc:
Kumar Gala1492ec82005-06-21 17:15:27 -0700835 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837#ifdef CONFIG_BOOKE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 .globl ret_from_mcheck_exc
839ret_from_mcheck_exc:
Kumar Gala1492ec82005-06-21 17:15:27 -0700840 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841#endif /* CONFIG_BOOKE */
842
843/*
844 * Load the DBCR0 value for a task that is being ptraced,
845 * having first saved away the global DBCR0. Note that r0
846 * has the dbcr0 value to set upon entry to this.
847 */
848load_dbcr0:
849 mfmsr r10 /* first disable debug exceptions */
850 rlwinm r10,r10,0,~MSR_DE
851 mtmsr r10
852 isync
853 mfspr r10,SPRN_DBCR0
854 lis r11,global_dbcr0@ha
855 addi r11,r11,global_dbcr0@l
856 stw r10,0(r11)
857 mtspr SPRN_DBCR0,r0
858 lwz r10,4(r11)
859 addi r10,r10,1
860 stw r10,4(r11)
861 li r11,-1
862 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
863 blr
864
865 .comm global_dbcr0,8
866#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
867
868do_work: /* r10 contains MSR_KERNEL here */
869 andi. r0,r9,_TIF_NEED_RESCHED
870 beq do_user_signal
871
872do_resched: /* r10 contains MSR_KERNEL here */
873 ori r10,r10,MSR_EE
874 SYNC
875 MTMSRD(r10) /* hard-enable interrupts */
876 bl schedule
877recheck:
878 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
879 SYNC
880 MTMSRD(r10) /* disable interrupts */
881 rlwinm r9,r1,0,0,18
882 lwz r9,TI_FLAGS(r9)
883 andi. r0,r9,_TIF_NEED_RESCHED
884 bne- do_resched
885 andi. r0,r9,_TIF_SIGPENDING
886 beq restore_user
887do_user_signal: /* r10 contains MSR_KERNEL here */
888 ori r10,r10,MSR_EE
889 SYNC
890 MTMSRD(r10) /* hard-enable interrupts */
891 /* save r13-r31 in the exception frame, if not already done */
892 lwz r3,TRAP(r1)
893 andi. r0,r3,1
894 beq 2f
895 SAVE_NVGPRS(r1)
896 rlwinm r3,r3,0,0,30
897 stw r3,TRAP(r1)
8982: li r3,0
899 addi r4,r1,STACK_FRAME_OVERHEAD
900 bl do_signal
901 REST_NVGPRS(r1)
902 b recheck
903
904/*
905 * We come here when we are at the end of handling an exception
906 * that occurred at a place where taking an exception will lose
907 * state information, such as the contents of SRR0 and SRR1.
908 */
909nonrecoverable:
910 lis r10,exc_exit_restart_end@ha
911 addi r10,r10,exc_exit_restart_end@l
912 cmplw r12,r10
913 bge 3f
914 lis r11,exc_exit_restart@ha
915 addi r11,r11,exc_exit_restart@l
916 cmplw r12,r11
917 blt 3f
918 lis r10,ee_restarts@ha
919 lwz r12,ee_restarts@l(r10)
920 addi r12,r12,1
921 stw r12,ee_restarts@l(r10)
922 mr r12,r11 /* restart at exc_exit_restart */
923 blr
9243: /* OK, we can't recover, kill this process */
925 /* but the 601 doesn't implement the RI bit, so assume it's OK */
926BEGIN_FTR_SECTION
927 blr
928END_FTR_SECTION_IFSET(CPU_FTR_601)
929 lwz r3,TRAP(r1)
930 andi. r0,r3,1
931 beq 4f
932 SAVE_NVGPRS(r1)
933 rlwinm r3,r3,0,0,30
934 stw r3,TRAP(r1)
9354: addi r3,r1,STACK_FRAME_OVERHEAD
936 bl nonrecoverable_exception
937 /* shouldn't return */
938 b 4b
939
940 .comm ee_restarts,4
941
942/*
943 * PROM code for specific machines follows. Put it
944 * here so it's easy to add arch-specific sections later.
945 * -- Cort
946 */
947#ifdef CONFIG_PPC_OF
948/*
949 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
950 * called with the MMU off.
951 */
952_GLOBAL(enter_rtas)
953 stwu r1,-INT_FRAME_SIZE(r1)
954 mflr r0
955 stw r0,INT_FRAME_SIZE+4(r1)
956 lis r4,rtas_data@ha
957 lwz r4,rtas_data@l(r4)
958 lis r6,1f@ha /* physical return address for rtas */
959 addi r6,r6,1f@l
960 tophys(r6,r6)
961 tophys(r7,r1)
962 lis r8,rtas_entry@ha
963 lwz r8,rtas_entry@l(r8)
964 mfmsr r9
965 stw r9,8(r1)
966 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
967 SYNC /* disable interrupts so SRR0/1 */
968 MTMSRD(r0) /* don't get trashed */
969 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
970 mtlr r6
971 CLR_TOP32(r7)
972 mtspr SPRN_SPRG2,r7
973 mtspr SPRN_SRR0,r8
974 mtspr SPRN_SRR1,r9
975 RFI
9761: tophys(r9,r1)
977 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
978 lwz r9,8(r9) /* original msr value */
979 FIX_SRR1(r9,r0)
980 addi r1,r1,INT_FRAME_SIZE
981 li r0,0
982 mtspr SPRN_SPRG2,r0
983 mtspr SPRN_SRR0,r8
984 mtspr SPRN_SRR1,r9
985 RFI /* return to caller */
986
987 .globl machine_check_in_rtas
988machine_check_in_rtas:
989 twi 31,0,0
990 /* XXX load up BATs and panic */
991
992#endif /* CONFIG_PPC_OF */