blob: b19bfef2034dbb67c737b7388ea5d92bcdcae0d4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
Sam Ravnborg0013a852005-09-09 20:57:26 +020031#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
Kumar Gala1492ec82005-06-21 17:15:27 -070048#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
49 mtspr exc_level##_SPRG,r8; \
50 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
51 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
52 stw r0,GPR10(r11); \
53 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
54 stw r0,GPR11(r11); \
55 mfspr r8,exc_level##_SPRG
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 .globl mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
Kumar Gala1492ec82005-06-21 17:15:27 -070059 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 b transfer_to_handler_full
61
Kumar Gala33d9e9b2005-06-25 14:54:37 -070062 .globl debug_transfer_to_handler
63debug_transfer_to_handler:
64 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65 b transfer_to_handler_full
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 .globl crit_transfer_to_handler
68crit_transfer_to_handler:
Kumar Gala1492ec82005-06-21 17:15:27 -070069 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 /* fall through */
71#endif
72
73#ifdef CONFIG_40x
74 .globl crit_transfer_to_handler
75crit_transfer_to_handler:
76 lwz r0,crit_r10@l(0)
77 stw r0,GPR10(r11)
78 lwz r0,crit_r11@l(0)
79 stw r0,GPR11(r11)
80 /* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90 .globl transfer_to_handler_full
91transfer_to_handler_full:
92 SAVE_NVGPRS(r11)
93 /* fall through */
94
95 .globl transfer_to_handler
96transfer_to_handler:
97 stw r2,GPR2(r11)
98 stw r12,_NIP(r11)
99 stw r9,_MSR(r11)
100 andi. r2,r9,MSR_PR
101 mfctr r12
102 mfspr r2,SPRN_XER
103 stw r12,_CTR(r11)
104 stw r2,_XER(r11)
105 mfspr r12,SPRN_SPRG3
106 addi r2,r12,-THREAD
107 tovirt(r2,r2) /* set r2 to current */
108 beq 2f /* if from user, fix up THREAD.regs */
109 addi r11,r1,STACK_FRAME_OVERHEAD
110 stw r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112 /* Check to see if the dbcr0 register is set up to debug. Use the
113 single-step bit to do this. */
114 lwz r12,THREAD_DBCR0(r12)
115 andis. r12,r12,DBCR0_IC@h
116 beq+ 3f
117 /* From user and task is ptraced - load up global dbcr0 */
118 li r12,-1 /* clear all pending debug events */
119 mtspr SPRN_DBSR,r12
120 lis r11,global_dbcr0@ha
121 tophys(r11,r11)
122 addi r11,r11,global_dbcr0@l
123 lwz r12,0(r11)
124 mtspr SPRN_DBCR0,r12
125 lwz r12,4(r11)
126 addi r12,r12,-1
127 stw r12,4(r11)
128#endif
129 b 3f
Becky Bruceea1e8472006-04-18 14:29:34 -0500130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
Becky Bruceea1e8472006-04-18 14:29:34 -0500134 lwz r9,THREAD_INFO-THREAD(r12)
135 cmplw r1,r9 /* if r1 <= current->thread_info */
136 ble- stack_ovf /* then the kernel stack overflowed */
1375:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#ifdef CONFIG_6xx
Becky Bruceea1e8472006-04-18 14:29:34 -0500139 tophys(r9,r9) /* check local flags */
140 lwz r12,TI_LOCAL_FLAGS(r9)
141 mtcrf 0x01,r12
142 bt- 31-TLF_NAPPING,4f
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463:
147 mflr r9
148 lwz r11,0(r9) /* virtual address of handler */
149 lwz r9,4(r9) /* where to go when done */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 mtspr SPRN_SRR0,r11
151 mtspr SPRN_SRR1,r10
152 mtlr r9
153 SYNC
154 RFI /* jump to handler, enable MMU */
155
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100156#ifdef CONFIG_6xx
Becky Bruceea1e8472006-04-18 14:29:34 -05001574: rlwinm r12,r12,0,~_TLF_NAPPING
158 stw r12,TI_LOCAL_FLAGS(r9)
159 b power_save_6xx_restore
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100160#endif
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/*
163 * On kernel stack overflow, load up an initial stack pointer
164 * and call StackOverflow(regs), which should not return.
165 */
166stack_ovf:
167 /* sometimes we use a statically-allocated stack, which is OK. */
Becky Bruceea1e8472006-04-18 14:29:34 -0500168 lis r12,_end@h
169 ori r12,r12,_end@l
170 cmplw r1,r12
171 ble 5b /* r1 <= &_end is OK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 SAVE_NVGPRS(r11)
173 addi r3,r1,STACK_FRAME_OVERHEAD
174 lis r1,init_thread_union@ha
175 addi r1,r1,init_thread_union@l
176 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
177 lis r9,StackOverflow@ha
178 addi r9,r9,StackOverflow@l
179 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
180 FIX_SRR1(r10,r12)
181 mtspr SPRN_SRR0,r9
182 mtspr SPRN_SRR1,r10
183 SYNC
184 RFI
185
186/*
187 * Handle a system call.
188 */
189 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
190 .stabs "entry.S",N_SO,0,0,0f
1910:
192
193_GLOBAL(DoSyscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 stw r3,ORIG_GPR3(r1)
195 li r12,0
196 stw r12,RESULT(r1)
197 lwz r11,_CCR(r1) /* Clear SO bit in CR */
198 rlwinm r11,r11,0,4,2
199 stw r11,_CCR(r1)
200#ifdef SHOW_SYSCALLS
201 bl do_show_syscall
202#endif /* SHOW_SYSCALLS */
203 rlwinm r10,r1,0,0,18 /* current_thread_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 lwz r11,TI_FLAGS(r10)
David Woodhouseea9c1022005-05-08 15:56:09 +0100205 andi. r11,r11,_TIF_SYSCALL_T_OR_A
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 bne- syscall_dotrace
207syscall_dotrace_cont:
208 cmplwi 0,r0,NR_syscalls
209 lis r10,sys_call_table@h
210 ori r10,r10,sys_call_table@l
211 slwi r0,r0,2
212 bge- 66f
213 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
214 mtlr r10
215 addi r9,r1,STACK_FRAME_OVERHEAD
Matt Porterc9cf73a2005-07-31 22:34:52 -0700216 PPC440EP_ERR42
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 blrl /* Call handler */
218 .globl ret_from_syscall
219ret_from_syscall:
220#ifdef SHOW_SYSCALLS
221 bl do_show_syscall_exit
222#endif
223 mr r6,r3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 rlwinm r12,r1,0,0,18 /* current_thread_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse1c3eb622005-11-26 14:44:47 +0000226 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 SYNC
228 MTMSRD(r10)
229 lwz r9,TI_FLAGS(r12)
David Woodhouse1c3eb622005-11-26 14:44:47 +0000230 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100231 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 bne- syscall_exit_work
David Woodhouse1c3eb622005-11-26 14:44:47 +0000233 cmplw 0,r3,r8
234 blt+ syscall_exit_cont
235 lwz r11,_CCR(r1) /* Load CR */
236 neg r3,r3
237 oris r11,r11,0x1000 /* Set SO bit in CR */
238 stw r11,_CCR(r1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239syscall_exit_cont:
240#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
241 /* If the process has its own DBCR0 value, load it up. The single
242 step bit tells us that dbcr0 should be loaded. */
243 lwz r0,THREAD+THREAD_DBCR0(r2)
244 andis. r10,r0,DBCR0_IC@h
245 bnel- load_dbcr0
246#endif
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100247#ifdef CONFIG_44x
248 lis r4,icache_44x_need_flush@ha
249 lwz r5,icache_44x_need_flush@l(r4)
250 cmplwi cr0,r5,0
251 bne- 2f
2521:
253#endif /* CONFIG_44x */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 stwcx. r0,0,r1 /* to clear the reservation */
255 lwz r4,_LINK(r1)
256 lwz r5,_CCR(r1)
257 mtlr r4
258 mtcr r5
259 lwz r7,_NIP(r1)
260 lwz r8,_MSR(r1)
261 FIX_SRR1(r8, r0)
262 lwz r2,GPR2(r1)
263 lwz r1,GPR1(r1)
264 mtspr SPRN_SRR0,r7
265 mtspr SPRN_SRR1,r8
266 SYNC
267 RFI
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100268#ifdef CONFIG_44x
2692: li r7,0
270 iccci r0,r0
271 stw r7,icache_44x_need_flush@l(r4)
272 b 1b
273#endif /* CONFIG_44x */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
27566: li r3,-ENOSYS
276 b ret_from_syscall
277
278 .globl ret_from_fork
279ret_from_fork:
280 REST_NVGPRS(r1)
281 bl schedule_tail
282 li r3,0
283 b ret_from_syscall
284
285/* Traced system call support */
286syscall_dotrace:
287 SAVE_NVGPRS(r1)
288 li r0,0xc00
289 stw r0,TRAP(r1)
David Woodhouseea9c1022005-05-08 15:56:09 +0100290 addi r3,r1,STACK_FRAME_OVERHEAD
291 bl do_syscall_trace_enter
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 lwz r0,GPR0(r1) /* Restore original registers */
293 lwz r3,GPR3(r1)
294 lwz r4,GPR4(r1)
295 lwz r5,GPR5(r1)
296 lwz r6,GPR6(r1)
297 lwz r7,GPR7(r1)
298 lwz r8,GPR8(r1)
299 REST_NVGPRS(r1)
300 b syscall_dotrace_cont
301
302syscall_exit_work:
David Woodhouse1c3eb622005-11-26 14:44:47 +0000303 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100304 beq+ 0f
305 REST_NVGPRS(r1)
306 b 2f
3070: cmplw 0,r3,r8
David Woodhouse1c3eb622005-11-26 14:44:47 +0000308 blt+ 1f
309 andi. r0,r9,_TIF_NOERROR
310 bne- 1f
311 lwz r11,_CCR(r1) /* Load CR */
312 neg r3,r3
313 oris r11,r11,0x1000 /* Set SO bit in CR */
314 stw r11,_CCR(r1)
315
3161: stw r6,RESULT(r1) /* Save result */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 stw r3,GPR3(r1) /* Update return value */
David Woodhouse1c3eb622005-11-26 14:44:47 +00003182: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
319 beq 4f
320
Paul Mackerras1bd79332006-03-08 13:24:22 +1100321 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse1c3eb622005-11-26 14:44:47 +0000322
323 li r11,_TIF_PERSYSCALL_MASK
324 addi r12,r12,TI_FLAGS
3253: lwarx r8,0,r12
326 andc r8,r8,r11
327#ifdef CONFIG_IBM405_ERR77
328 dcbt 0,r12
329#endif
330 stwcx. r8,0,r12
331 bne- 3b
332 subi r12,r12,TI_FLAGS
333
3344: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100335 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
336 beq ret_from_except
337
338 /* Re-enable interrupts */
339 ori r10,r10,MSR_EE
340 SYNC
341 MTMSRD(r10)
David Woodhouse1c3eb622005-11-26 14:44:47 +0000342
343 /* Save NVGPRS if they're not saved already */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 lwz r4,TRAP(r1)
345 andi. r4,r4,1
David Woodhouse1c3eb622005-11-26 14:44:47 +0000346 beq 5f
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 SAVE_NVGPRS(r1)
348 li r4,0xc00
349 stw r4,TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11003505:
David Woodhouseea9c1022005-05-08 15:56:09 +0100351 addi r3,r1,STACK_FRAME_OVERHEAD
352 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100353 b ret_from_except_full
David Woodhouse1c3eb622005-11-26 14:44:47 +0000354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355#ifdef SHOW_SYSCALLS
356do_show_syscall:
357#ifdef SHOW_SYSCALLS_TASK
358 lis r11,show_syscalls_task@ha
359 lwz r11,show_syscalls_task@l(r11)
360 cmp 0,r2,r11
361 bnelr
362#endif
363 stw r31,GPR31(r1)
364 mflr r31
365 lis r3,7f@ha
366 addi r3,r3,7f@l
367 lwz r4,GPR0(r1)
368 lwz r5,GPR3(r1)
369 lwz r6,GPR4(r1)
370 lwz r7,GPR5(r1)
371 lwz r8,GPR6(r1)
372 lwz r9,GPR7(r1)
373 bl printk
374 lis r3,77f@ha
375 addi r3,r3,77f@l
376 lwz r4,GPR8(r1)
377 mr r5,r2
378 bl printk
379 lwz r0,GPR0(r1)
380 lwz r3,GPR3(r1)
381 lwz r4,GPR4(r1)
382 lwz r5,GPR5(r1)
383 lwz r6,GPR6(r1)
384 lwz r7,GPR7(r1)
385 lwz r8,GPR8(r1)
386 mtlr r31
387 lwz r31,GPR31(r1)
388 blr
389
390do_show_syscall_exit:
391#ifdef SHOW_SYSCALLS_TASK
392 lis r11,show_syscalls_task@ha
393 lwz r11,show_syscalls_task@l(r11)
394 cmp 0,r2,r11
395 bnelr
396#endif
397 stw r31,GPR31(r1)
398 mflr r31
399 stw r3,RESULT(r1) /* Save result */
400 mr r4,r3
401 lis r3,79f@ha
402 addi r3,r3,79f@l
403 bl printk
404 lwz r3,RESULT(r1)
405 mtlr r31
406 lwz r31,GPR31(r1)
407 blr
408
4097: .string "syscall %d(%x, %x, %x, %x, %x, "
41077: .string "%x), current=%p\n"
41179: .string " -> %x\n"
412 .align 2,0
413
414#ifdef SHOW_SYSCALLS_TASK
415 .data
416 .globl show_syscalls_task
417show_syscalls_task:
418 .long -1
419 .text
420#endif
421#endif /* SHOW_SYSCALLS */
422
423/*
David Woodhouse1c3eb622005-11-26 14:44:47 +0000424 * The fork/clone functions need to copy the full register set into
425 * the child process. Therefore we need to save all the nonvolatile
426 * registers (r13 - r31) before calling the C code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 .globl ppc_fork
429ppc_fork:
430 SAVE_NVGPRS(r1)
431 lwz r0,TRAP(r1)
432 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
433 stw r0,TRAP(r1) /* register set saved */
434 b sys_fork
435
436 .globl ppc_vfork
437ppc_vfork:
438 SAVE_NVGPRS(r1)
439 lwz r0,TRAP(r1)
440 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
441 stw r0,TRAP(r1) /* register set saved */
442 b sys_vfork
443
444 .globl ppc_clone
445ppc_clone:
446 SAVE_NVGPRS(r1)
447 lwz r0,TRAP(r1)
448 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
449 stw r0,TRAP(r1) /* register set saved */
450 b sys_clone
451
Paul Mackerras1bd79332006-03-08 13:24:22 +1100452 .globl ppc_swapcontext
453ppc_swapcontext:
454 SAVE_NVGPRS(r1)
455 lwz r0,TRAP(r1)
456 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
457 stw r0,TRAP(r1) /* register set saved */
458 b sys_swapcontext
459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460/*
461 * Top-level page fault handling.
462 * This is in assembler because if do_page_fault tells us that
463 * it is a bad kernel page fault, we want to save the non-volatile
464 * registers before calling bad_page_fault.
465 */
466 .globl handle_page_fault
467handle_page_fault:
468 stw r4,_DAR(r1)
469 addi r3,r1,STACK_FRAME_OVERHEAD
470 bl do_page_fault
471 cmpwi r3,0
472 beq+ ret_from_except
473 SAVE_NVGPRS(r1)
474 lwz r0,TRAP(r1)
475 clrrwi r0,r0,1
476 stw r0,TRAP(r1)
477 mr r5,r3
478 addi r3,r1,STACK_FRAME_OVERHEAD
479 lwz r4,_DAR(r1)
480 bl bad_page_fault
481 b ret_from_except_full
482
483/*
484 * This routine switches between two different tasks. The process
485 * state of one is saved on its kernel stack. Then the state
486 * of the other is restored from its kernel stack. The memory
487 * management hardware is updated to the second process's state.
488 * Finally, we can return to the second process.
489 * On entry, r3 points to the THREAD for the current task, r4
490 * points to the THREAD for the new task.
491 *
492 * This routine is always called with interrupts disabled.
493 *
494 * Note: there are two ways to get to the "going out" portion
495 * of this code; either by coming in via the entry (_switch)
496 * or via "fork" which must set up an environment equivalent
497 * to the "_switch" path. If you change this , you'll have to
498 * change the fork code also.
499 *
500 * The code which creates the new task context is in 'copy_thread'
501 * in arch/ppc/kernel/process.c
502 */
503_GLOBAL(_switch)
504 stwu r1,-INT_FRAME_SIZE(r1)
505 mflr r0
506 stw r0,INT_FRAME_SIZE+4(r1)
507 /* r3-r12 are caller saved -- Cort */
508 SAVE_NVGPRS(r1)
509 stw r0,_NIP(r1) /* Return to switch caller */
510 mfmsr r11
511 li r0,MSR_FP /* Disable floating-point */
512#ifdef CONFIG_ALTIVEC
513BEGIN_FTR_SECTION
514 oris r0,r0,MSR_VEC@h /* Disable altivec */
515 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
516 stw r12,THREAD+THREAD_VRSAVE(r2)
517END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
518#endif /* CONFIG_ALTIVEC */
519#ifdef CONFIG_SPE
520 oris r0,r0,MSR_SPE@h /* Disable SPE */
521 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
522 stw r12,THREAD+THREAD_SPEFSCR(r2)
523#endif /* CONFIG_SPE */
524 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
525 beq+ 1f
526 andc r11,r11,r0
527 MTMSRD(r11)
528 isync
5291: stw r11,_MSR(r1)
530 mfcr r10
531 stw r10,_CCR(r1)
532 stw r1,KSP(r3) /* Set old stack pointer */
533
534#ifdef CONFIG_SMP
535 /* We need a sync somewhere here to make sure that if the
536 * previous task gets rescheduled on another CPU, it sees all
537 * stores it has performed on this one.
538 */
539 sync
540#endif /* CONFIG_SMP */
541
542 tophys(r0,r4)
543 CLR_TOP32(r0)
544 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
545 lwz r1,KSP(r4) /* Load new stack pointer */
546
547 /* save the old current 'last' for return value */
548 mr r3,r2
549 addi r2,r4,-THREAD /* Update current */
550
551#ifdef CONFIG_ALTIVEC
552BEGIN_FTR_SECTION
553 lwz r0,THREAD+THREAD_VRSAVE(r2)
554 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
555END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
556#endif /* CONFIG_ALTIVEC */
557#ifdef CONFIG_SPE
558 lwz r0,THREAD+THREAD_SPEFSCR(r2)
559 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
560#endif /* CONFIG_SPE */
561
562 lwz r0,_CCR(r1)
563 mtcrf 0xFF,r0
564 /* r3-r12 are destroyed -- Cort */
565 REST_NVGPRS(r1)
566
567 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
568 mtlr r4
569 addi r1,r1,INT_FRAME_SIZE
570 blr
571
Paul Mackerras443a8482005-05-01 08:58:40 -0700572 .globl fast_exception_return
573fast_exception_return:
574#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
575 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
576 beq 1f /* if not, we've got problems */
577#endif
578
5792: REST_4GPRS(3, r11)
580 lwz r10,_CCR(r11)
581 REST_GPR(1, r11)
582 mtcr r10
583 lwz r10,_LINK(r11)
584 mtlr r10
585 REST_GPR(10, r11)
586 mtspr SPRN_SRR1,r9
587 mtspr SPRN_SRR0,r12
588 REST_GPR(9, r11)
589 REST_GPR(12, r11)
590 lwz r11,GPR11(r11)
591 SYNC
592 RFI
593
594#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
595/* check if the exception happened in a restartable section */
5961: lis r3,exc_exit_restart_end@ha
597 addi r3,r3,exc_exit_restart_end@l
598 cmplw r12,r3
599 bge 3f
600 lis r4,exc_exit_restart@ha
601 addi r4,r4,exc_exit_restart@l
602 cmplw r12,r4
603 blt 3f
604 lis r3,fee_restarts@ha
605 tophys(r3,r3)
606 lwz r5,fee_restarts@l(r3)
607 addi r5,r5,1
608 stw r5,fee_restarts@l(r3)
609 mr r12,r4 /* restart at exc_exit_restart */
610 b 2b
611
Kumar Gala9c05e632007-05-14 17:12:24 -0500612 .section .bss
613 .align 2
614fee_restarts:
615 .space 4
616 .previous
Paul Mackerras443a8482005-05-01 08:58:40 -0700617
618/* aargh, a nonrecoverable interrupt, panic */
619/* aargh, we don't know which trap this is */
620/* but the 601 doesn't implement the RI bit, so assume it's OK */
6213:
622BEGIN_FTR_SECTION
623 b 2b
624END_FTR_SECTION_IFSET(CPU_FTR_601)
625 li r10,-1
626 stw r10,TRAP(r11)
627 addi r3,r1,STACK_FRAME_OVERHEAD
628 lis r10,MSR_KERNEL@h
629 ori r10,r10,MSR_KERNEL@l
630 bl transfer_to_handler_full
631 .long nonrecoverable_exception
632 .long ret_from_except
633#endif
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 .globl ret_from_except_full
636ret_from_except_full:
637 REST_NVGPRS(r1)
638 /* fall through */
639
640 .globl ret_from_except
641ret_from_except:
642 /* Hard-disable interrupts so that current_thread_info()->flags
643 * can't change between when we test it and when we return
644 * from the interrupt. */
645 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
646 SYNC /* Some chip revs have problems here... */
647 MTMSRD(r10) /* disable interrupts */
648
649 lwz r3,_MSR(r1) /* Returning to user mode? */
650 andi. r0,r3,MSR_PR
651 beq resume_kernel
652
653user_exc_return: /* r10 contains MSR_KERNEL here */
654 /* Check current_thread_info()->flags */
655 rlwinm r9,r1,0,0,18
656 lwz r9,TI_FLAGS(r9)
Paul Mackerras1bd79332006-03-08 13:24:22 +1100657 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 bne do_work
659
660restore_user:
661#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
662 /* Check whether this process has its own DBCR0 value. The single
663 step bit tells us that dbcr0 should be loaded. */
664 lwz r0,THREAD+THREAD_DBCR0(r2)
665 andis. r10,r0,DBCR0_IC@h
666 bnel- load_dbcr0
667#endif
668
669#ifdef CONFIG_PREEMPT
670 b restore
671
672/* N.B. the only way to get here is from the beq following ret_from_except. */
673resume_kernel:
674 /* check current_thread_info->preempt_count */
675 rlwinm r9,r1,0,0,18
676 lwz r0,TI_PREEMPT(r9)
677 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
678 bne restore
679 lwz r0,TI_FLAGS(r9)
680 andi. r0,r0,_TIF_NEED_RESCHED
681 beq+ restore
682 andi. r0,r3,MSR_EE /* interrupts off? */
683 beq restore /* don't schedule if so */
6841: bl preempt_schedule_irq
685 rlwinm r9,r1,0,0,18
686 lwz r3,TI_FLAGS(r9)
687 andi. r0,r3,_TIF_NEED_RESCHED
688 bne- 1b
689#else
690resume_kernel:
691#endif /* CONFIG_PREEMPT */
692
693 /* interrupts are hard-disabled at this point */
694restore:
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100695#ifdef CONFIG_44x
696 lis r4,icache_44x_need_flush@ha
697 lwz r5,icache_44x_need_flush@l(r4)
698 cmplwi cr0,r5,0
699 beq+ 1f
700 li r6,0
701 iccci r0,r0
702 stw r6,icache_44x_need_flush@l(r4)
7031:
704#endif /* CONFIG_44x */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 lwz r0,GPR0(r1)
706 lwz r2,GPR2(r1)
707 REST_4GPRS(3, r1)
708 REST_2GPRS(7, r1)
709
710 lwz r10,_XER(r1)
711 lwz r11,_CTR(r1)
712 mtspr SPRN_XER,r10
713 mtctr r11
714
715 PPC405_ERR77(0,r1)
716 stwcx. r0,0,r1 /* to clear the reservation */
717
718#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
719 lwz r9,_MSR(r1)
720 andi. r10,r9,MSR_RI /* check if this exception occurred */
721 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
722
723 lwz r10,_CCR(r1)
724 lwz r11,_LINK(r1)
725 mtcrf 0xFF,r10
726 mtlr r11
727
728 /*
729 * Once we put values in SRR0 and SRR1, we are in a state
730 * where exceptions are not recoverable, since taking an
731 * exception will trash SRR0 and SRR1. Therefore we clear the
732 * MSR:RI bit to indicate this. If we do take an exception,
733 * we can't return to the point of the exception but we
734 * can restart the exception exit path at the label
735 * exc_exit_restart below. -- paulus
736 */
737 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
738 SYNC
739 MTMSRD(r10) /* clear the RI bit */
740 .globl exc_exit_restart
741exc_exit_restart:
742 lwz r9,_MSR(r1)
743 lwz r12,_NIP(r1)
744 FIX_SRR1(r9,r10)
745 mtspr SPRN_SRR0,r12
746 mtspr SPRN_SRR1,r9
747 REST_4GPRS(9, r1)
748 lwz r1,GPR1(r1)
749 .globl exc_exit_restart_end
750exc_exit_restart_end:
751 SYNC
752 RFI
753
754#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
755 /*
756 * This is a bit different on 4xx/Book-E because it doesn't have
757 * the RI bit in the MSR.
758 * The TLB miss handler checks if we have interrupted
759 * the exception exit path and restarts it if so
760 * (well maybe one day it will... :).
761 */
762 lwz r11,_LINK(r1)
763 mtlr r11
764 lwz r10,_CCR(r1)
765 mtcrf 0xff,r10
766 REST_2GPRS(9, r1)
767 .globl exc_exit_restart
768exc_exit_restart:
769 lwz r11,_NIP(r1)
770 lwz r12,_MSR(r1)
771exc_exit_start:
772 mtspr SPRN_SRR0,r11
773 mtspr SPRN_SRR1,r12
774 REST_2GPRS(11, r1)
775 lwz r1,GPR1(r1)
776 .globl exc_exit_restart_end
777exc_exit_restart_end:
778 PPC405_ERR77_SYNC
779 rfi
780 b . /* prevent prefetch past rfi */
781
782/*
783 * Returning from a critical interrupt in user mode doesn't need
784 * to be any different from a normal exception. For a critical
785 * interrupt in the kernel, we just return (without checking for
786 * preemption) since the interrupt may have happened at some crucial
787 * place (e.g. inside the TLB miss handler), and because we will be
788 * running with r1 pointing into critical_stack, not the current
789 * process's kernel stack (and therefore current_thread_info() will
790 * give the wrong answer).
791 * We have to restore various SPRs that may have been in use at the
792 * time of the critical interrupt.
793 *
794 */
Kumar Gala1492ec82005-06-21 17:15:27 -0700795#ifdef CONFIG_40x
796#define PPC_40x_TURN_OFF_MSR_DR \
797 /* avoid any possible TLB misses here by turning off MSR.DR, we \
798 * assume the instructions here are mapped by a pinned TLB entry */ \
799 li r10,MSR_IR; \
800 mtmsr r10; \
801 isync; \
802 tophys(r1, r1);
803#else
804#define PPC_40x_TURN_OFF_MSR_DR
805#endif
806
807#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
808 REST_NVGPRS(r1); \
809 lwz r3,_MSR(r1); \
810 andi. r3,r3,MSR_PR; \
811 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
812 bne user_exc_return; \
813 lwz r0,GPR0(r1); \
814 lwz r2,GPR2(r1); \
815 REST_4GPRS(3, r1); \
816 REST_2GPRS(7, r1); \
817 lwz r10,_XER(r1); \
818 lwz r11,_CTR(r1); \
819 mtspr SPRN_XER,r10; \
820 mtctr r11; \
821 PPC405_ERR77(0,r1); \
822 stwcx. r0,0,r1; /* to clear the reservation */ \
823 lwz r11,_LINK(r1); \
824 mtlr r11; \
825 lwz r10,_CCR(r1); \
826 mtcrf 0xff,r10; \
827 PPC_40x_TURN_OFF_MSR_DR; \
828 lwz r9,_DEAR(r1); \
829 lwz r10,_ESR(r1); \
830 mtspr SPRN_DEAR,r9; \
831 mtspr SPRN_ESR,r10; \
832 lwz r11,_NIP(r1); \
833 lwz r12,_MSR(r1); \
834 mtspr exc_lvl_srr0,r11; \
835 mtspr exc_lvl_srr1,r12; \
836 lwz r9,GPR9(r1); \
837 lwz r12,GPR12(r1); \
838 lwz r10,GPR10(r1); \
839 lwz r11,GPR11(r1); \
840 lwz r1,GPR1(r1); \
841 PPC405_ERR77_SYNC; \
842 exc_lvl_rfi; \
843 b .; /* prevent prefetch past exc_lvl_rfi */
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 .globl ret_from_crit_exc
846ret_from_crit_exc:
Kumar Gala1492ec82005-06-21 17:15:27 -0700847 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849#ifdef CONFIG_BOOKE
Kumar Gala33d9e9b2005-06-25 14:54:37 -0700850 .globl ret_from_debug_exc
851ret_from_debug_exc:
852 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
853
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 .globl ret_from_mcheck_exc
855ret_from_mcheck_exc:
Kumar Gala1492ec82005-06-21 17:15:27 -0700856 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857#endif /* CONFIG_BOOKE */
858
859/*
860 * Load the DBCR0 value for a task that is being ptraced,
861 * having first saved away the global DBCR0. Note that r0
862 * has the dbcr0 value to set upon entry to this.
863 */
864load_dbcr0:
865 mfmsr r10 /* first disable debug exceptions */
866 rlwinm r10,r10,0,~MSR_DE
867 mtmsr r10
868 isync
869 mfspr r10,SPRN_DBCR0
870 lis r11,global_dbcr0@ha
871 addi r11,r11,global_dbcr0@l
872 stw r10,0(r11)
873 mtspr SPRN_DBCR0,r0
874 lwz r10,4(r11)
875 addi r10,r10,1
876 stw r10,4(r11)
877 li r11,-1
878 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
879 blr
880
Kumar Gala9c05e632007-05-14 17:12:24 -0500881 .section .bss
882 .align 4
883global_dbcr0:
884 .space 8
885 .previous
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
887
888do_work: /* r10 contains MSR_KERNEL here */
889 andi. r0,r9,_TIF_NEED_RESCHED
890 beq do_user_signal
891
892do_resched: /* r10 contains MSR_KERNEL here */
893 ori r10,r10,MSR_EE
894 SYNC
895 MTMSRD(r10) /* hard-enable interrupts */
896 bl schedule
897recheck:
898 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
899 SYNC
900 MTMSRD(r10) /* disable interrupts */
901 rlwinm r9,r1,0,0,18
902 lwz r9,TI_FLAGS(r9)
903 andi. r0,r9,_TIF_NEED_RESCHED
904 bne- do_resched
905 andi. r0,r9,_TIF_SIGPENDING
906 beq restore_user
907do_user_signal: /* r10 contains MSR_KERNEL here */
908 ori r10,r10,MSR_EE
909 SYNC
910 MTMSRD(r10) /* hard-enable interrupts */
911 /* save r13-r31 in the exception frame, if not already done */
912 lwz r3,TRAP(r1)
913 andi. r0,r3,1
914 beq 2f
915 SAVE_NVGPRS(r1)
916 rlwinm r3,r3,0,0,30
917 stw r3,TRAP(r1)
9182: li r3,0
919 addi r4,r1,STACK_FRAME_OVERHEAD
920 bl do_signal
921 REST_NVGPRS(r1)
922 b recheck
923
924/*
925 * We come here when we are at the end of handling an exception
926 * that occurred at a place where taking an exception will lose
927 * state information, such as the contents of SRR0 and SRR1.
928 */
929nonrecoverable:
930 lis r10,exc_exit_restart_end@ha
931 addi r10,r10,exc_exit_restart_end@l
932 cmplw r12,r10
933 bge 3f
934 lis r11,exc_exit_restart@ha
935 addi r11,r11,exc_exit_restart@l
936 cmplw r12,r11
937 blt 3f
938 lis r10,ee_restarts@ha
939 lwz r12,ee_restarts@l(r10)
940 addi r12,r12,1
941 stw r12,ee_restarts@l(r10)
942 mr r12,r11 /* restart at exc_exit_restart */
943 blr
9443: /* OK, we can't recover, kill this process */
945 /* but the 601 doesn't implement the RI bit, so assume it's OK */
946BEGIN_FTR_SECTION
947 blr
948END_FTR_SECTION_IFSET(CPU_FTR_601)
949 lwz r3,TRAP(r1)
950 andi. r0,r3,1
951 beq 4f
952 SAVE_NVGPRS(r1)
953 rlwinm r3,r3,0,0,30
954 stw r3,TRAP(r1)
9554: addi r3,r1,STACK_FRAME_OVERHEAD
956 bl nonrecoverable_exception
957 /* shouldn't return */
958 b 4b
959
Kumar Gala9c05e632007-05-14 17:12:24 -0500960 .section .bss
961 .align 2
962ee_restarts:
963 .space 4
964 .previous