blob: 5f3a5d068a5c6af9475babc8828d0e87f4290245 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
Sam Ravnborg0013a852005-09-09 20:57:26 +020031#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
Kumar Gala1492ec82005-06-21 17:15:27 -070048#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
49 mtspr exc_level##_SPRG,r8; \
50 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
51 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
52 stw r0,GPR10(r11); \
53 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
54 stw r0,GPR11(r11); \
55 mfspr r8,exc_level##_SPRG
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 .globl mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
Kumar Gala1492ec82005-06-21 17:15:27 -070059 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 b transfer_to_handler_full
61
Kumar Gala33d9e9b2005-06-25 14:54:37 -070062 .globl debug_transfer_to_handler
63debug_transfer_to_handler:
64 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65 b transfer_to_handler_full
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 .globl crit_transfer_to_handler
68crit_transfer_to_handler:
Kumar Gala1492ec82005-06-21 17:15:27 -070069 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 /* fall through */
71#endif
72
73#ifdef CONFIG_40x
74 .globl crit_transfer_to_handler
75crit_transfer_to_handler:
76 lwz r0,crit_r10@l(0)
77 stw r0,GPR10(r11)
78 lwz r0,crit_r11@l(0)
79 stw r0,GPR11(r11)
80 /* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90 .globl transfer_to_handler_full
91transfer_to_handler_full:
92 SAVE_NVGPRS(r11)
93 /* fall through */
94
95 .globl transfer_to_handler
96transfer_to_handler:
97 stw r2,GPR2(r11)
98 stw r12,_NIP(r11)
99 stw r9,_MSR(r11)
100 andi. r2,r9,MSR_PR
101 mfctr r12
102 mfspr r2,SPRN_XER
103 stw r12,_CTR(r11)
104 stw r2,_XER(r11)
105 mfspr r12,SPRN_SPRG3
106 addi r2,r12,-THREAD
107 tovirt(r2,r2) /* set r2 to current */
108 beq 2f /* if from user, fix up THREAD.regs */
109 addi r11,r1,STACK_FRAME_OVERHEAD
110 stw r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112 /* Check to see if the dbcr0 register is set up to debug. Use the
113 single-step bit to do this. */
114 lwz r12,THREAD_DBCR0(r12)
115 andis. r12,r12,DBCR0_IC@h
116 beq+ 3f
117 /* From user and task is ptraced - load up global dbcr0 */
118 li r12,-1 /* clear all pending debug events */
119 mtspr SPRN_DBSR,r12
120 lis r11,global_dbcr0@ha
121 tophys(r11,r11)
122 addi r11,r11,global_dbcr0@l
123 lwz r12,0(r11)
124 mtspr SPRN_DBCR0,r12
125 lwz r12,4(r11)
126 addi r12,r12,-1
127 stw r12,4(r11)
128#endif
129 b 3f
Becky Bruceea1e8472006-04-18 14:29:34 -0500130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
Becky Bruceea1e8472006-04-18 14:29:34 -0500134 lwz r9,THREAD_INFO-THREAD(r12)
135 cmplw r1,r9 /* if r1 <= current->thread_info */
136 ble- stack_ovf /* then the kernel stack overflowed */
1375:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#ifdef CONFIG_6xx
Becky Bruceea1e8472006-04-18 14:29:34 -0500139 tophys(r9,r9) /* check local flags */
140 lwz r12,TI_LOCAL_FLAGS(r9)
141 mtcrf 0x01,r12
142 bt- 31-TLF_NAPPING,4f
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463:
147 mflr r9
148 lwz r11,0(r9) /* virtual address of handler */
149 lwz r9,4(r9) /* where to go when done */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 mtspr SPRN_SRR0,r11
151 mtspr SPRN_SRR1,r10
152 mtlr r9
153 SYNC
154 RFI /* jump to handler, enable MMU */
155
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100156#ifdef CONFIG_6xx
Becky Bruceea1e8472006-04-18 14:29:34 -05001574: rlwinm r12,r12,0,~_TLF_NAPPING
158 stw r12,TI_LOCAL_FLAGS(r9)
159 b power_save_6xx_restore
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100160#endif
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/*
163 * On kernel stack overflow, load up an initial stack pointer
164 * and call StackOverflow(regs), which should not return.
165 */
166stack_ovf:
167 /* sometimes we use a statically-allocated stack, which is OK. */
Becky Bruceea1e8472006-04-18 14:29:34 -0500168 lis r12,_end@h
169 ori r12,r12,_end@l
170 cmplw r1,r12
171 ble 5b /* r1 <= &_end is OK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 SAVE_NVGPRS(r11)
173 addi r3,r1,STACK_FRAME_OVERHEAD
174 lis r1,init_thread_union@ha
175 addi r1,r1,init_thread_union@l
176 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
177 lis r9,StackOverflow@ha
178 addi r9,r9,StackOverflow@l
179 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
180 FIX_SRR1(r10,r12)
181 mtspr SPRN_SRR0,r9
182 mtspr SPRN_SRR1,r10
183 SYNC
184 RFI
185
186/*
187 * Handle a system call.
188 */
189 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
190 .stabs "entry.S",N_SO,0,0,0f
1910:
192
193_GLOBAL(DoSyscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 stw r3,ORIG_GPR3(r1)
195 li r12,0
196 stw r12,RESULT(r1)
197 lwz r11,_CCR(r1) /* Clear SO bit in CR */
198 rlwinm r11,r11,0,4,2
199 stw r11,_CCR(r1)
200#ifdef SHOW_SYSCALLS
201 bl do_show_syscall
202#endif /* SHOW_SYSCALLS */
203 rlwinm r10,r1,0,0,18 /* current_thread_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 lwz r11,TI_FLAGS(r10)
David Woodhouseea9c1022005-05-08 15:56:09 +0100205 andi. r11,r11,_TIF_SYSCALL_T_OR_A
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 bne- syscall_dotrace
207syscall_dotrace_cont:
208 cmplwi 0,r0,NR_syscalls
209 lis r10,sys_call_table@h
210 ori r10,r10,sys_call_table@l
211 slwi r0,r0,2
212 bge- 66f
213 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
214 mtlr r10
215 addi r9,r1,STACK_FRAME_OVERHEAD
Matt Porterc9cf73a2005-07-31 22:34:52 -0700216 PPC440EP_ERR42
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 blrl /* Call handler */
218 .globl ret_from_syscall
219ret_from_syscall:
220#ifdef SHOW_SYSCALLS
221 bl do_show_syscall_exit
222#endif
223 mr r6,r3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 rlwinm r12,r1,0,0,18 /* current_thread_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse1c3eb622005-11-26 14:44:47 +0000226 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 SYNC
228 MTMSRD(r10)
229 lwz r9,TI_FLAGS(r12)
David Woodhouse1c3eb622005-11-26 14:44:47 +0000230 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100231 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 bne- syscall_exit_work
David Woodhouse1c3eb622005-11-26 14:44:47 +0000233 cmplw 0,r3,r8
234 blt+ syscall_exit_cont
235 lwz r11,_CCR(r1) /* Load CR */
236 neg r3,r3
237 oris r11,r11,0x1000 /* Set SO bit in CR */
238 stw r11,_CCR(r1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239syscall_exit_cont:
240#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
241 /* If the process has its own DBCR0 value, load it up. The single
242 step bit tells us that dbcr0 should be loaded. */
243 lwz r0,THREAD+THREAD_DBCR0(r2)
244 andis. r10,r0,DBCR0_IC@h
245 bnel- load_dbcr0
246#endif
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100247#ifdef CONFIG_44x
248 lis r4,icache_44x_need_flush@ha
249 lwz r5,icache_44x_need_flush@l(r4)
250 cmplwi cr0,r5,0
251 bne- 2f
2521:
253#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100254BEGIN_FTR_SECTION
255 lwarx r7,0,r1
256END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 stwcx. r0,0,r1 /* to clear the reservation */
258 lwz r4,_LINK(r1)
259 lwz r5,_CCR(r1)
260 mtlr r4
261 mtcr r5
262 lwz r7,_NIP(r1)
263 lwz r8,_MSR(r1)
264 FIX_SRR1(r8, r0)
265 lwz r2,GPR2(r1)
266 lwz r1,GPR1(r1)
267 mtspr SPRN_SRR0,r7
268 mtspr SPRN_SRR1,r8
269 SYNC
270 RFI
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100271#ifdef CONFIG_44x
2722: li r7,0
273 iccci r0,r0
274 stw r7,icache_44x_need_flush@l(r4)
275 b 1b
276#endif /* CONFIG_44x */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
27866: li r3,-ENOSYS
279 b ret_from_syscall
280
281 .globl ret_from_fork
282ret_from_fork:
283 REST_NVGPRS(r1)
284 bl schedule_tail
285 li r3,0
286 b ret_from_syscall
287
288/* Traced system call support */
289syscall_dotrace:
290 SAVE_NVGPRS(r1)
291 li r0,0xc00
292 stw r0,TRAP(r1)
David Woodhouseea9c1022005-05-08 15:56:09 +0100293 addi r3,r1,STACK_FRAME_OVERHEAD
294 bl do_syscall_trace_enter
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 lwz r0,GPR0(r1) /* Restore original registers */
296 lwz r3,GPR3(r1)
297 lwz r4,GPR4(r1)
298 lwz r5,GPR5(r1)
299 lwz r6,GPR6(r1)
300 lwz r7,GPR7(r1)
301 lwz r8,GPR8(r1)
302 REST_NVGPRS(r1)
303 b syscall_dotrace_cont
304
305syscall_exit_work:
David Woodhouse1c3eb622005-11-26 14:44:47 +0000306 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100307 beq+ 0f
308 REST_NVGPRS(r1)
309 b 2f
3100: cmplw 0,r3,r8
David Woodhouse1c3eb622005-11-26 14:44:47 +0000311 blt+ 1f
312 andi. r0,r9,_TIF_NOERROR
313 bne- 1f
314 lwz r11,_CCR(r1) /* Load CR */
315 neg r3,r3
316 oris r11,r11,0x1000 /* Set SO bit in CR */
317 stw r11,_CCR(r1)
318
3191: stw r6,RESULT(r1) /* Save result */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 stw r3,GPR3(r1) /* Update return value */
David Woodhouse1c3eb622005-11-26 14:44:47 +00003212: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
322 beq 4f
323
Paul Mackerras1bd79332006-03-08 13:24:22 +1100324 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse1c3eb622005-11-26 14:44:47 +0000325
326 li r11,_TIF_PERSYSCALL_MASK
327 addi r12,r12,TI_FLAGS
3283: lwarx r8,0,r12
329 andc r8,r8,r11
330#ifdef CONFIG_IBM405_ERR77
331 dcbt 0,r12
332#endif
333 stwcx. r8,0,r12
334 bne- 3b
335 subi r12,r12,TI_FLAGS
336
3374: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100338 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
339 beq ret_from_except
340
341 /* Re-enable interrupts */
342 ori r10,r10,MSR_EE
343 SYNC
344 MTMSRD(r10)
David Woodhouse1c3eb622005-11-26 14:44:47 +0000345
346 /* Save NVGPRS if they're not saved already */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 lwz r4,TRAP(r1)
348 andi. r4,r4,1
David Woodhouse1c3eb622005-11-26 14:44:47 +0000349 beq 5f
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 SAVE_NVGPRS(r1)
351 li r4,0xc00
352 stw r4,TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11003535:
David Woodhouseea9c1022005-05-08 15:56:09 +0100354 addi r3,r1,STACK_FRAME_OVERHEAD
355 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100356 b ret_from_except_full
David Woodhouse1c3eb622005-11-26 14:44:47 +0000357
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358#ifdef SHOW_SYSCALLS
359do_show_syscall:
360#ifdef SHOW_SYSCALLS_TASK
361 lis r11,show_syscalls_task@ha
362 lwz r11,show_syscalls_task@l(r11)
363 cmp 0,r2,r11
364 bnelr
365#endif
366 stw r31,GPR31(r1)
367 mflr r31
368 lis r3,7f@ha
369 addi r3,r3,7f@l
370 lwz r4,GPR0(r1)
371 lwz r5,GPR3(r1)
372 lwz r6,GPR4(r1)
373 lwz r7,GPR5(r1)
374 lwz r8,GPR6(r1)
375 lwz r9,GPR7(r1)
376 bl printk
377 lis r3,77f@ha
378 addi r3,r3,77f@l
379 lwz r4,GPR8(r1)
380 mr r5,r2
381 bl printk
382 lwz r0,GPR0(r1)
383 lwz r3,GPR3(r1)
384 lwz r4,GPR4(r1)
385 lwz r5,GPR5(r1)
386 lwz r6,GPR6(r1)
387 lwz r7,GPR7(r1)
388 lwz r8,GPR8(r1)
389 mtlr r31
390 lwz r31,GPR31(r1)
391 blr
392
393do_show_syscall_exit:
394#ifdef SHOW_SYSCALLS_TASK
395 lis r11,show_syscalls_task@ha
396 lwz r11,show_syscalls_task@l(r11)
397 cmp 0,r2,r11
398 bnelr
399#endif
400 stw r31,GPR31(r1)
401 mflr r31
402 stw r3,RESULT(r1) /* Save result */
403 mr r4,r3
404 lis r3,79f@ha
405 addi r3,r3,79f@l
406 bl printk
407 lwz r3,RESULT(r1)
408 mtlr r31
409 lwz r31,GPR31(r1)
410 blr
411
4127: .string "syscall %d(%x, %x, %x, %x, %x, "
41377: .string "%x), current=%p\n"
41479: .string " -> %x\n"
415 .align 2,0
416
417#ifdef SHOW_SYSCALLS_TASK
418 .data
419 .globl show_syscalls_task
420show_syscalls_task:
421 .long -1
422 .text
423#endif
424#endif /* SHOW_SYSCALLS */
425
426/*
David Woodhouse1c3eb622005-11-26 14:44:47 +0000427 * The fork/clone functions need to copy the full register set into
428 * the child process. Therefore we need to save all the nonvolatile
429 * registers (r13 - r31) before calling the C code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 .globl ppc_fork
432ppc_fork:
433 SAVE_NVGPRS(r1)
434 lwz r0,TRAP(r1)
435 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
436 stw r0,TRAP(r1) /* register set saved */
437 b sys_fork
438
439 .globl ppc_vfork
440ppc_vfork:
441 SAVE_NVGPRS(r1)
442 lwz r0,TRAP(r1)
443 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
444 stw r0,TRAP(r1) /* register set saved */
445 b sys_vfork
446
447 .globl ppc_clone
448ppc_clone:
449 SAVE_NVGPRS(r1)
450 lwz r0,TRAP(r1)
451 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
452 stw r0,TRAP(r1) /* register set saved */
453 b sys_clone
454
Paul Mackerras1bd79332006-03-08 13:24:22 +1100455 .globl ppc_swapcontext
456ppc_swapcontext:
457 SAVE_NVGPRS(r1)
458 lwz r0,TRAP(r1)
459 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
460 stw r0,TRAP(r1) /* register set saved */
461 b sys_swapcontext
462
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463/*
464 * Top-level page fault handling.
465 * This is in assembler because if do_page_fault tells us that
466 * it is a bad kernel page fault, we want to save the non-volatile
467 * registers before calling bad_page_fault.
468 */
469 .globl handle_page_fault
470handle_page_fault:
471 stw r4,_DAR(r1)
472 addi r3,r1,STACK_FRAME_OVERHEAD
473 bl do_page_fault
474 cmpwi r3,0
475 beq+ ret_from_except
476 SAVE_NVGPRS(r1)
477 lwz r0,TRAP(r1)
478 clrrwi r0,r0,1
479 stw r0,TRAP(r1)
480 mr r5,r3
481 addi r3,r1,STACK_FRAME_OVERHEAD
482 lwz r4,_DAR(r1)
483 bl bad_page_fault
484 b ret_from_except_full
485
486/*
487 * This routine switches between two different tasks. The process
488 * state of one is saved on its kernel stack. Then the state
489 * of the other is restored from its kernel stack. The memory
490 * management hardware is updated to the second process's state.
491 * Finally, we can return to the second process.
492 * On entry, r3 points to the THREAD for the current task, r4
493 * points to the THREAD for the new task.
494 *
495 * This routine is always called with interrupts disabled.
496 *
497 * Note: there are two ways to get to the "going out" portion
498 * of this code; either by coming in via the entry (_switch)
499 * or via "fork" which must set up an environment equivalent
500 * to the "_switch" path. If you change this , you'll have to
501 * change the fork code also.
502 *
503 * The code which creates the new task context is in 'copy_thread'
504 * in arch/ppc/kernel/process.c
505 */
506_GLOBAL(_switch)
507 stwu r1,-INT_FRAME_SIZE(r1)
508 mflr r0
509 stw r0,INT_FRAME_SIZE+4(r1)
510 /* r3-r12 are caller saved -- Cort */
511 SAVE_NVGPRS(r1)
512 stw r0,_NIP(r1) /* Return to switch caller */
513 mfmsr r11
514 li r0,MSR_FP /* Disable floating-point */
515#ifdef CONFIG_ALTIVEC
516BEGIN_FTR_SECTION
517 oris r0,r0,MSR_VEC@h /* Disable altivec */
518 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
519 stw r12,THREAD+THREAD_VRSAVE(r2)
520END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
521#endif /* CONFIG_ALTIVEC */
Kumar Galac42f3ad2008-01-27 14:06:14 -0600522 and. r0,r0,r11 /* FP or altivec enabled? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 beq+ 1f
524 andc r11,r11,r0
525 MTMSRD(r11)
526 isync
5271: stw r11,_MSR(r1)
528 mfcr r10
529 stw r10,_CCR(r1)
530 stw r1,KSP(r3) /* Set old stack pointer */
531
532#ifdef CONFIG_SMP
533 /* We need a sync somewhere here to make sure that if the
534 * previous task gets rescheduled on another CPU, it sees all
535 * stores it has performed on this one.
536 */
537 sync
538#endif /* CONFIG_SMP */
539
540 tophys(r0,r4)
541 CLR_TOP32(r0)
542 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
543 lwz r1,KSP(r4) /* Load new stack pointer */
544
545 /* save the old current 'last' for return value */
546 mr r3,r2
547 addi r2,r4,-THREAD /* Update current */
548
549#ifdef CONFIG_ALTIVEC
550BEGIN_FTR_SECTION
551 lwz r0,THREAD+THREAD_VRSAVE(r2)
552 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
553END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
554#endif /* CONFIG_ALTIVEC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 lwz r0,_CCR(r1)
556 mtcrf 0xFF,r0
557 /* r3-r12 are destroyed -- Cort */
558 REST_NVGPRS(r1)
559
560 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
561 mtlr r4
562 addi r1,r1,INT_FRAME_SIZE
563 blr
564
Paul Mackerras443a8482005-05-01 08:58:40 -0700565 .globl fast_exception_return
566fast_exception_return:
567#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
568 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
569 beq 1f /* if not, we've got problems */
570#endif
571
5722: REST_4GPRS(3, r11)
573 lwz r10,_CCR(r11)
574 REST_GPR(1, r11)
575 mtcr r10
576 lwz r10,_LINK(r11)
577 mtlr r10
578 REST_GPR(10, r11)
579 mtspr SPRN_SRR1,r9
580 mtspr SPRN_SRR0,r12
581 REST_GPR(9, r11)
582 REST_GPR(12, r11)
583 lwz r11,GPR11(r11)
584 SYNC
585 RFI
586
587#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
588/* check if the exception happened in a restartable section */
5891: lis r3,exc_exit_restart_end@ha
590 addi r3,r3,exc_exit_restart_end@l
591 cmplw r12,r3
592 bge 3f
593 lis r4,exc_exit_restart@ha
594 addi r4,r4,exc_exit_restart@l
595 cmplw r12,r4
596 blt 3f
597 lis r3,fee_restarts@ha
598 tophys(r3,r3)
599 lwz r5,fee_restarts@l(r3)
600 addi r5,r5,1
601 stw r5,fee_restarts@l(r3)
602 mr r12,r4 /* restart at exc_exit_restart */
603 b 2b
604
Kumar Gala9c05e632007-05-14 17:12:24 -0500605 .section .bss
606 .align 2
607fee_restarts:
608 .space 4
609 .previous
Paul Mackerras443a8482005-05-01 08:58:40 -0700610
611/* aargh, a nonrecoverable interrupt, panic */
612/* aargh, we don't know which trap this is */
613/* but the 601 doesn't implement the RI bit, so assume it's OK */
6143:
615BEGIN_FTR_SECTION
616 b 2b
617END_FTR_SECTION_IFSET(CPU_FTR_601)
618 li r10,-1
619 stw r10,TRAP(r11)
620 addi r3,r1,STACK_FRAME_OVERHEAD
621 lis r10,MSR_KERNEL@h
622 ori r10,r10,MSR_KERNEL@l
623 bl transfer_to_handler_full
624 .long nonrecoverable_exception
625 .long ret_from_except
626#endif
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 .globl ret_from_except_full
629ret_from_except_full:
630 REST_NVGPRS(r1)
631 /* fall through */
632
633 .globl ret_from_except
634ret_from_except:
635 /* Hard-disable interrupts so that current_thread_info()->flags
636 * can't change between when we test it and when we return
637 * from the interrupt. */
638 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
639 SYNC /* Some chip revs have problems here... */
640 MTMSRD(r10) /* disable interrupts */
641
642 lwz r3,_MSR(r1) /* Returning to user mode? */
643 andi. r0,r3,MSR_PR
644 beq resume_kernel
645
646user_exc_return: /* r10 contains MSR_KERNEL here */
647 /* Check current_thread_info()->flags */
648 rlwinm r9,r1,0,0,18
649 lwz r9,TI_FLAGS(r9)
Paul Mackerras1bd79332006-03-08 13:24:22 +1100650 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 bne do_work
652
653restore_user:
654#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
655 /* Check whether this process has its own DBCR0 value. The single
656 step bit tells us that dbcr0 should be loaded. */
657 lwz r0,THREAD+THREAD_DBCR0(r2)
658 andis. r10,r0,DBCR0_IC@h
659 bnel- load_dbcr0
660#endif
661
662#ifdef CONFIG_PREEMPT
663 b restore
664
665/* N.B. the only way to get here is from the beq following ret_from_except. */
666resume_kernel:
667 /* check current_thread_info->preempt_count */
668 rlwinm r9,r1,0,0,18
669 lwz r0,TI_PREEMPT(r9)
670 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
671 bne restore
672 lwz r0,TI_FLAGS(r9)
673 andi. r0,r0,_TIF_NEED_RESCHED
674 beq+ restore
675 andi. r0,r3,MSR_EE /* interrupts off? */
676 beq restore /* don't schedule if so */
6771: bl preempt_schedule_irq
678 rlwinm r9,r1,0,0,18
679 lwz r3,TI_FLAGS(r9)
680 andi. r0,r3,_TIF_NEED_RESCHED
681 bne- 1b
682#else
683resume_kernel:
684#endif /* CONFIG_PREEMPT */
685
686 /* interrupts are hard-disabled at this point */
687restore:
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100688#ifdef CONFIG_44x
689 lis r4,icache_44x_need_flush@ha
690 lwz r5,icache_44x_need_flush@l(r4)
691 cmplwi cr0,r5,0
692 beq+ 1f
693 li r6,0
694 iccci r0,r0
695 stw r6,icache_44x_need_flush@l(r4)
6961:
697#endif /* CONFIG_44x */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 lwz r0,GPR0(r1)
699 lwz r2,GPR2(r1)
700 REST_4GPRS(3, r1)
701 REST_2GPRS(7, r1)
702
703 lwz r10,_XER(r1)
704 lwz r11,_CTR(r1)
705 mtspr SPRN_XER,r10
706 mtctr r11
707
708 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100709BEGIN_FTR_SECTION
710 lwarx r11,0,r1
711END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 stwcx. r0,0,r1 /* to clear the reservation */
713
714#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
715 lwz r9,_MSR(r1)
716 andi. r10,r9,MSR_RI /* check if this exception occurred */
717 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
718
719 lwz r10,_CCR(r1)
720 lwz r11,_LINK(r1)
721 mtcrf 0xFF,r10
722 mtlr r11
723
724 /*
725 * Once we put values in SRR0 and SRR1, we are in a state
726 * where exceptions are not recoverable, since taking an
727 * exception will trash SRR0 and SRR1. Therefore we clear the
728 * MSR:RI bit to indicate this. If we do take an exception,
729 * we can't return to the point of the exception but we
730 * can restart the exception exit path at the label
731 * exc_exit_restart below. -- paulus
732 */
733 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
734 SYNC
735 MTMSRD(r10) /* clear the RI bit */
736 .globl exc_exit_restart
737exc_exit_restart:
738 lwz r9,_MSR(r1)
739 lwz r12,_NIP(r1)
740 FIX_SRR1(r9,r10)
741 mtspr SPRN_SRR0,r12
742 mtspr SPRN_SRR1,r9
743 REST_4GPRS(9, r1)
744 lwz r1,GPR1(r1)
745 .globl exc_exit_restart_end
746exc_exit_restart_end:
747 SYNC
748 RFI
749
750#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
751 /*
752 * This is a bit different on 4xx/Book-E because it doesn't have
753 * the RI bit in the MSR.
754 * The TLB miss handler checks if we have interrupted
755 * the exception exit path and restarts it if so
756 * (well maybe one day it will... :).
757 */
758 lwz r11,_LINK(r1)
759 mtlr r11
760 lwz r10,_CCR(r1)
761 mtcrf 0xff,r10
762 REST_2GPRS(9, r1)
763 .globl exc_exit_restart
764exc_exit_restart:
765 lwz r11,_NIP(r1)
766 lwz r12,_MSR(r1)
767exc_exit_start:
768 mtspr SPRN_SRR0,r11
769 mtspr SPRN_SRR1,r12
770 REST_2GPRS(11, r1)
771 lwz r1,GPR1(r1)
772 .globl exc_exit_restart_end
773exc_exit_restart_end:
774 PPC405_ERR77_SYNC
775 rfi
776 b . /* prevent prefetch past rfi */
777
778/*
779 * Returning from a critical interrupt in user mode doesn't need
780 * to be any different from a normal exception. For a critical
781 * interrupt in the kernel, we just return (without checking for
782 * preemption) since the interrupt may have happened at some crucial
783 * place (e.g. inside the TLB miss handler), and because we will be
784 * running with r1 pointing into critical_stack, not the current
785 * process's kernel stack (and therefore current_thread_info() will
786 * give the wrong answer).
787 * We have to restore various SPRs that may have been in use at the
788 * time of the critical interrupt.
789 *
790 */
Kumar Gala1492ec82005-06-21 17:15:27 -0700791#ifdef CONFIG_40x
792#define PPC_40x_TURN_OFF_MSR_DR \
793 /* avoid any possible TLB misses here by turning off MSR.DR, we \
794 * assume the instructions here are mapped by a pinned TLB entry */ \
795 li r10,MSR_IR; \
796 mtmsr r10; \
797 isync; \
798 tophys(r1, r1);
799#else
800#define PPC_40x_TURN_OFF_MSR_DR
801#endif
802
803#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
804 REST_NVGPRS(r1); \
805 lwz r3,_MSR(r1); \
806 andi. r3,r3,MSR_PR; \
807 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
808 bne user_exc_return; \
809 lwz r0,GPR0(r1); \
810 lwz r2,GPR2(r1); \
811 REST_4GPRS(3, r1); \
812 REST_2GPRS(7, r1); \
813 lwz r10,_XER(r1); \
814 lwz r11,_CTR(r1); \
815 mtspr SPRN_XER,r10; \
816 mtctr r11; \
817 PPC405_ERR77(0,r1); \
818 stwcx. r0,0,r1; /* to clear the reservation */ \
819 lwz r11,_LINK(r1); \
820 mtlr r11; \
821 lwz r10,_CCR(r1); \
822 mtcrf 0xff,r10; \
823 PPC_40x_TURN_OFF_MSR_DR; \
824 lwz r9,_DEAR(r1); \
825 lwz r10,_ESR(r1); \
826 mtspr SPRN_DEAR,r9; \
827 mtspr SPRN_ESR,r10; \
828 lwz r11,_NIP(r1); \
829 lwz r12,_MSR(r1); \
830 mtspr exc_lvl_srr0,r11; \
831 mtspr exc_lvl_srr1,r12; \
832 lwz r9,GPR9(r1); \
833 lwz r12,GPR12(r1); \
834 lwz r10,GPR10(r1); \
835 lwz r11,GPR11(r1); \
836 lwz r1,GPR1(r1); \
837 PPC405_ERR77_SYNC; \
838 exc_lvl_rfi; \
839 b .; /* prevent prefetch past exc_lvl_rfi */
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 .globl ret_from_crit_exc
842ret_from_crit_exc:
Kumar Gala1492ec82005-06-21 17:15:27 -0700843 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
845#ifdef CONFIG_BOOKE
Kumar Gala33d9e9b2005-06-25 14:54:37 -0700846 .globl ret_from_debug_exc
847ret_from_debug_exc:
848 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 .globl ret_from_mcheck_exc
851ret_from_mcheck_exc:
Kumar Gala1492ec82005-06-21 17:15:27 -0700852 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853#endif /* CONFIG_BOOKE */
854
855/*
856 * Load the DBCR0 value for a task that is being ptraced,
857 * having first saved away the global DBCR0. Note that r0
858 * has the dbcr0 value to set upon entry to this.
859 */
860load_dbcr0:
861 mfmsr r10 /* first disable debug exceptions */
862 rlwinm r10,r10,0,~MSR_DE
863 mtmsr r10
864 isync
865 mfspr r10,SPRN_DBCR0
866 lis r11,global_dbcr0@ha
867 addi r11,r11,global_dbcr0@l
868 stw r10,0(r11)
869 mtspr SPRN_DBCR0,r0
870 lwz r10,4(r11)
871 addi r10,r10,1
872 stw r10,4(r11)
873 li r11,-1
874 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
875 blr
876
Kumar Gala9c05e632007-05-14 17:12:24 -0500877 .section .bss
878 .align 4
879global_dbcr0:
880 .space 8
881 .previous
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
883
884do_work: /* r10 contains MSR_KERNEL here */
885 andi. r0,r9,_TIF_NEED_RESCHED
886 beq do_user_signal
887
888do_resched: /* r10 contains MSR_KERNEL here */
889 ori r10,r10,MSR_EE
890 SYNC
891 MTMSRD(r10) /* hard-enable interrupts */
892 bl schedule
893recheck:
894 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
895 SYNC
896 MTMSRD(r10) /* disable interrupts */
897 rlwinm r9,r1,0,0,18
898 lwz r9,TI_FLAGS(r9)
899 andi. r0,r9,_TIF_NEED_RESCHED
900 bne- do_resched
901 andi. r0,r9,_TIF_SIGPENDING
902 beq restore_user
903do_user_signal: /* r10 contains MSR_KERNEL here */
904 ori r10,r10,MSR_EE
905 SYNC
906 MTMSRD(r10) /* hard-enable interrupts */
907 /* save r13-r31 in the exception frame, if not already done */
908 lwz r3,TRAP(r1)
909 andi. r0,r3,1
910 beq 2f
911 SAVE_NVGPRS(r1)
912 rlwinm r3,r3,0,0,30
913 stw r3,TRAP(r1)
9142: li r3,0
915 addi r4,r1,STACK_FRAME_OVERHEAD
916 bl do_signal
917 REST_NVGPRS(r1)
918 b recheck
919
920/*
921 * We come here when we are at the end of handling an exception
922 * that occurred at a place where taking an exception will lose
923 * state information, such as the contents of SRR0 and SRR1.
924 */
925nonrecoverable:
926 lis r10,exc_exit_restart_end@ha
927 addi r10,r10,exc_exit_restart_end@l
928 cmplw r12,r10
929 bge 3f
930 lis r11,exc_exit_restart@ha
931 addi r11,r11,exc_exit_restart@l
932 cmplw r12,r11
933 blt 3f
934 lis r10,ee_restarts@ha
935 lwz r12,ee_restarts@l(r10)
936 addi r12,r12,1
937 stw r12,ee_restarts@l(r10)
938 mr r12,r11 /* restart at exc_exit_restart */
939 blr
9403: /* OK, we can't recover, kill this process */
941 /* but the 601 doesn't implement the RI bit, so assume it's OK */
942BEGIN_FTR_SECTION
943 blr
944END_FTR_SECTION_IFSET(CPU_FTR_601)
945 lwz r3,TRAP(r1)
946 andi. r0,r3,1
947 beq 4f
948 SAVE_NVGPRS(r1)
949 rlwinm r3,r3,0,0,30
950 stw r3,TRAP(r1)
9514: addi r3,r1,STACK_FRAME_OVERHEAD
952 bl nonrecoverable_exception
953 /* shouldn't return */
954 b 4b
955
Kumar Gala9c05e632007-05-14 17:12:24 -0500956 .section .bss
957 .align 2
958ee_restarts:
959 .space 4
960 .previous