blob: ab2d62f70b14a0d3c688754436aae5163c083bcd [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Paul Mackerras9994a332005-10-10 22:36:14 +100022#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +100047 .globl mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050049 mfspr r0,SPRN_DSRR0
50 stw r0,_DSRR0(r11)
51 mfspr r0,SPRN_DSRR1
52 stw r0,_DSRR1(r11)
53 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100054
55 .globl debug_transfer_to_handler
56debug_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050057 mfspr r0,SPRN_CSRR0
58 stw r0,_CSRR0(r11)
59 mfspr r0,SPRN_CSRR1
60 stw r0,_CSRR1(r11)
61 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100062
63 .globl crit_transfer_to_handler
64crit_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050065#ifdef CONFIG_FSL_BOOKE
66 mfspr r0,SPRN_MAS0
67 stw r0,MAS0(r11)
68 mfspr r0,SPRN_MAS1
69 stw r0,MAS1(r11)
70 mfspr r0,SPRN_MAS2
71 stw r0,MAS2(r11)
72 mfspr r0,SPRN_MAS3
73 stw r0,MAS3(r11)
74 mfspr r0,SPRN_MAS6
75 stw r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77 mfspr r0,SPRN_MAS7
78 stw r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
80#endif /* CONFIG_FSL_BOOKE */
81#ifdef CONFIG_44x
82 mfspr r0,SPRN_MMUCR
83 stw r0,MMUCR(r11)
84#endif
85 mfspr r0,SPRN_SRR0
86 stw r0,_SRR0(r11)
87 mfspr r0,SPRN_SRR1
88 stw r0,_SRR1(r11)
89
90 mfspr r8,SPRN_SPRG3
91 lwz r0,KSP_LIMIT(r8)
92 stw r0,SAVED_KSP_LIMIT(r11)
93 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
94 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +100095 /* fall through */
96#endif
97
98#ifdef CONFIG_40x
99 .globl crit_transfer_to_handler
100crit_transfer_to_handler:
101 lwz r0,crit_r10@l(0)
102 stw r0,GPR10(r11)
103 lwz r0,crit_r11@l(0)
104 stw r0,GPR11(r11)
Kumar Galafca622c2008-04-30 05:23:21 -0500105 mfspr r0,SPRN_SRR0
106 stw r0,crit_srr0@l(0)
107 mfspr r0,SPRN_SRR1
108 stw r0,crit_srr1@l(0)
109
110 mfspr r8,SPRN_SPRG3
111 lwz r0,KSP_LIMIT(r8)
112 stw r0,saved_ksp_limit@l(0)
113 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
114 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000115 /* fall through */
116#endif
117
118/*
119 * This code finishes saving the registers to the exception frame
120 * and jumps to the appropriate handler for the exception, turning
121 * on address translation.
122 * Note that we rely on the caller having set cr0.eq iff the exception
123 * occurred in kernel mode (i.e. MSR:PR = 0).
124 */
125 .globl transfer_to_handler_full
126transfer_to_handler_full:
127 SAVE_NVGPRS(r11)
128 /* fall through */
129
130 .globl transfer_to_handler
131transfer_to_handler:
132 stw r2,GPR2(r11)
133 stw r12,_NIP(r11)
134 stw r9,_MSR(r11)
135 andi. r2,r9,MSR_PR
136 mfctr r12
137 mfspr r2,SPRN_XER
138 stw r12,_CTR(r11)
139 stw r2,_XER(r11)
140 mfspr r12,SPRN_SPRG3
141 addi r2,r12,-THREAD
142 tovirt(r2,r2) /* set r2 to current */
143 beq 2f /* if from user, fix up THREAD.regs */
144 addi r11,r1,STACK_FRAME_OVERHEAD
145 stw r11,PT_REGS(r12)
146#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
147 /* Check to see if the dbcr0 register is set up to debug. Use the
Kumar Gala4eaddb42008-04-09 16:15:40 -0500148 internal debug mode bit to do this. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000149 lwz r12,THREAD_DBCR0(r12)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500150 andis. r12,r12,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000151 beq+ 3f
152 /* From user and task is ptraced - load up global dbcr0 */
153 li r12,-1 /* clear all pending debug events */
154 mtspr SPRN_DBSR,r12
155 lis r11,global_dbcr0@ha
156 tophys(r11,r11)
157 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -0500158#ifdef CONFIG_SMP
159 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
160 lwz r9,TI_CPU(r9)
161 slwi r9,r9,3
162 add r11,r11,r9
163#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000164 lwz r12,0(r11)
165 mtspr SPRN_DBCR0,r12
166 lwz r12,4(r11)
167 addi r12,r12,-1
168 stw r12,4(r11)
169#endif
170 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000171
Paul Mackerras9994a332005-10-10 22:36:14 +10001722: /* if from kernel, check interrupted DOZE/NAP mode and
173 * check for stack overflow
174 */
Kumar Gala85218822008-04-28 16:21:22 +1000175 lwz r9,KSP_LIMIT(r12)
176 cmplw r1,r9 /* if r1 <= ksp_limit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000177 ble- stack_ovf /* then the kernel stack overflowed */
1785:
Kumar Galafc4033b2008-06-18 16:26:52 -0500179#if defined(CONFIG_6xx) || defined(CONFIG_E500)
Kumar Gala85218822008-04-28 16:21:22 +1000180 rlwinm r9,r1,0,0,31-THREAD_SHIFT
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000181 tophys(r9,r9) /* check local flags */
182 lwz r12,TI_LOCAL_FLAGS(r9)
183 mtcrf 0x01,r12
184 bt- 31-TLF_NAPPING,4f
Paul Mackerrasa5606432008-05-14 14:30:48 +1000185 bt- 31-TLF_SLEEPING,7f
Kumar Galafc4033b2008-06-18 16:26:52 -0500186#endif /* CONFIG_6xx || CONFIG_E500 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000187 .globl transfer_to_handler_cont
188transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10001893:
190 mflr r9
191 lwz r11,0(r9) /* virtual address of handler */
192 lwz r9,4(r9) /* where to go when done */
Paul Mackerras9994a332005-10-10 22:36:14 +1000193 mtspr SPRN_SRR0,r11
194 mtspr SPRN_SRR1,r10
195 mtlr r9
196 SYNC
197 RFI /* jump to handler, enable MMU */
198
Kumar Galafc4033b2008-06-18 16:26:52 -0500199#if defined (CONFIG_6xx) || defined(CONFIG_E500)
Paul Mackerrasf39224a2006-04-18 21:49:11 +10002004: rlwinm r12,r12,0,~_TLF_NAPPING
201 stw r12,TI_LOCAL_FLAGS(r9)
Kumar Galafc4033b2008-06-18 16:26:52 -0500202 b power_save_ppc32_restore
Paul Mackerrasa5606432008-05-14 14:30:48 +1000203
2047: rlwinm r12,r12,0,~_TLF_SLEEPING
205 stw r12,TI_LOCAL_FLAGS(r9)
206 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
207 rlwinm r9,r9,0,~MSR_EE
208 lwz r12,_LINK(r11) /* and return to address in LR */
209 b fast_exception_return
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100210#endif
211
Paul Mackerras9994a332005-10-10 22:36:14 +1000212/*
213 * On kernel stack overflow, load up an initial stack pointer
214 * and call StackOverflow(regs), which should not return.
215 */
216stack_ovf:
217 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000218 lis r12,_end@h
219 ori r12,r12,_end@l
220 cmplw r1,r12
221 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000222 SAVE_NVGPRS(r11)
223 addi r3,r1,STACK_FRAME_OVERHEAD
224 lis r1,init_thread_union@ha
225 addi r1,r1,init_thread_union@l
226 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
227 lis r9,StackOverflow@ha
228 addi r9,r9,StackOverflow@l
229 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
230 FIX_SRR1(r10,r12)
231 mtspr SPRN_SRR0,r9
232 mtspr SPRN_SRR1,r10
233 SYNC
234 RFI
235
236/*
237 * Handle a system call.
238 */
239 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
240 .stabs "entry_32.S",N_SO,0,0,0f
2410:
242
243_GLOBAL(DoSyscall)
Paul Mackerras9994a332005-10-10 22:36:14 +1000244 stw r3,ORIG_GPR3(r1)
245 li r12,0
246 stw r12,RESULT(r1)
247 lwz r11,_CCR(r1) /* Clear SO bit in CR */
248 rlwinm r11,r11,0,4,2
249 stw r11,_CCR(r1)
250#ifdef SHOW_SYSCALLS
251 bl do_show_syscall
252#endif /* SHOW_SYSCALLS */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000253 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000254 lwz r11,TI_FLAGS(r10)
255 andi. r11,r11,_TIF_SYSCALL_T_OR_A
256 bne- syscall_dotrace
257syscall_dotrace_cont:
258 cmplwi 0,r0,NR_syscalls
259 lis r10,sys_call_table@h
260 ori r10,r10,sys_call_table@l
261 slwi r0,r0,2
262 bge- 66f
263 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
264 mtlr r10
265 addi r9,r1,STACK_FRAME_OVERHEAD
266 PPC440EP_ERR42
267 blrl /* Call handler */
268 .globl ret_from_syscall
269ret_from_syscall:
270#ifdef SHOW_SYSCALLS
271 bl do_show_syscall_exit
272#endif
273 mr r6,r3
David Gibson6cb7bfe2005-10-21 15:45:50 +1000274 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
Paul Mackerras9994a332005-10-10 22:36:14 +1000275 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000276 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000277 SYNC
278 MTMSRD(r10)
279 lwz r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000280 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100281 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000282 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000283 cmplw 0,r3,r8
284 blt+ syscall_exit_cont
285 lwz r11,_CCR(r1) /* Load CR */
286 neg r3,r3
287 oris r11,r11,0x1000 /* Set SO bit in CR */
288 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000289syscall_exit_cont:
290#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500291 /* If the process has its own DBCR0 value, load it up. The internal
292 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000293 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500294 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000295 bnel- load_dbcr0
296#endif
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100297#ifdef CONFIG_44x
298 lis r4,icache_44x_need_flush@ha
299 lwz r5,icache_44x_need_flush@l(r4)
300 cmplwi cr0,r5,0
301 bne- 2f
3021:
303#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100304BEGIN_FTR_SECTION
305 lwarx r7,0,r1
306END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000307 stwcx. r0,0,r1 /* to clear the reservation */
308 lwz r4,_LINK(r1)
309 lwz r5,_CCR(r1)
310 mtlr r4
311 mtcr r5
312 lwz r7,_NIP(r1)
313 lwz r8,_MSR(r1)
314 FIX_SRR1(r8, r0)
315 lwz r2,GPR2(r1)
316 lwz r1,GPR1(r1)
317 mtspr SPRN_SRR0,r7
318 mtspr SPRN_SRR1,r8
319 SYNC
320 RFI
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100321#ifdef CONFIG_44x
3222: li r7,0
323 iccci r0,r0
324 stw r7,icache_44x_need_flush@l(r4)
325 b 1b
326#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000327
32866: li r3,-ENOSYS
329 b ret_from_syscall
330
331 .globl ret_from_fork
332ret_from_fork:
333 REST_NVGPRS(r1)
334 bl schedule_tail
335 li r3,0
336 b ret_from_syscall
337
338/* Traced system call support */
339syscall_dotrace:
340 SAVE_NVGPRS(r1)
341 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000342 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000343 addi r3,r1,STACK_FRAME_OVERHEAD
344 bl do_syscall_trace_enter
345 lwz r0,GPR0(r1) /* Restore original registers */
346 lwz r3,GPR3(r1)
347 lwz r4,GPR4(r1)
348 lwz r5,GPR5(r1)
349 lwz r6,GPR6(r1)
350 lwz r7,GPR7(r1)
351 lwz r8,GPR8(r1)
352 REST_NVGPRS(r1)
353 b syscall_dotrace_cont
354
355syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000356 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100357 beq+ 0f
358 REST_NVGPRS(r1)
359 b 2f
3600: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000361 blt+ 1f
362 andi. r0,r9,_TIF_NOERROR
363 bne- 1f
364 lwz r11,_CCR(r1) /* Load CR */
365 neg r3,r3
366 oris r11,r11,0x1000 /* Set SO bit in CR */
367 stw r11,_CCR(r1)
368
3691: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000370 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00003712: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
372 beq 4f
373
Paul Mackerras1bd79332006-03-08 13:24:22 +1100374 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000375
376 li r11,_TIF_PERSYSCALL_MASK
377 addi r12,r12,TI_FLAGS
3783: lwarx r8,0,r12
379 andc r8,r8,r11
380#ifdef CONFIG_IBM405_ERR77
381 dcbt 0,r12
382#endif
383 stwcx. r8,0,r12
384 bne- 3b
385 subi r12,r12,TI_FLAGS
386
3874: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100388 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
389 beq ret_from_except
390
391 /* Re-enable interrupts */
392 ori r10,r10,MSR_EE
393 SYNC
394 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000395
396 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000397 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000398 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000399 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000400 SAVE_NVGPRS(r1)
401 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000402 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11004035:
Paul Mackerras9994a332005-10-10 22:36:14 +1000404 addi r3,r1,STACK_FRAME_OVERHEAD
405 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100406 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000407
Paul Mackerras9994a332005-10-10 22:36:14 +1000408#ifdef SHOW_SYSCALLS
409do_show_syscall:
410#ifdef SHOW_SYSCALLS_TASK
411 lis r11,show_syscalls_task@ha
412 lwz r11,show_syscalls_task@l(r11)
413 cmp 0,r2,r11
414 bnelr
415#endif
416 stw r31,GPR31(r1)
417 mflr r31
418 lis r3,7f@ha
419 addi r3,r3,7f@l
420 lwz r4,GPR0(r1)
421 lwz r5,GPR3(r1)
422 lwz r6,GPR4(r1)
423 lwz r7,GPR5(r1)
424 lwz r8,GPR6(r1)
425 lwz r9,GPR7(r1)
426 bl printk
427 lis r3,77f@ha
428 addi r3,r3,77f@l
429 lwz r4,GPR8(r1)
430 mr r5,r2
431 bl printk
432 lwz r0,GPR0(r1)
433 lwz r3,GPR3(r1)
434 lwz r4,GPR4(r1)
435 lwz r5,GPR5(r1)
436 lwz r6,GPR6(r1)
437 lwz r7,GPR7(r1)
438 lwz r8,GPR8(r1)
439 mtlr r31
440 lwz r31,GPR31(r1)
441 blr
442
443do_show_syscall_exit:
444#ifdef SHOW_SYSCALLS_TASK
445 lis r11,show_syscalls_task@ha
446 lwz r11,show_syscalls_task@l(r11)
447 cmp 0,r2,r11
448 bnelr
449#endif
450 stw r31,GPR31(r1)
451 mflr r31
452 stw r3,RESULT(r1) /* Save result */
453 mr r4,r3
454 lis r3,79f@ha
455 addi r3,r3,79f@l
456 bl printk
457 lwz r3,RESULT(r1)
458 mtlr r31
459 lwz r31,GPR31(r1)
460 blr
461
4627: .string "syscall %d(%x, %x, %x, %x, %x, "
46377: .string "%x), current=%p\n"
46479: .string " -> %x\n"
465 .align 2,0
466
467#ifdef SHOW_SYSCALLS_TASK
468 .data
469 .globl show_syscalls_task
470show_syscalls_task:
471 .long -1
472 .text
473#endif
474#endif /* SHOW_SYSCALLS */
475
476/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000477 * The fork/clone functions need to copy the full register set into
478 * the child process. Therefore we need to save all the nonvolatile
479 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000480 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000481 .globl ppc_fork
482ppc_fork:
483 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000484 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000485 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000486 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000487 b sys_fork
488
489 .globl ppc_vfork
490ppc_vfork:
491 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000492 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000493 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000494 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000495 b sys_vfork
496
497 .globl ppc_clone
498ppc_clone:
499 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000500 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000501 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000502 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000503 b sys_clone
504
Paul Mackerras1bd79332006-03-08 13:24:22 +1100505 .globl ppc_swapcontext
506ppc_swapcontext:
507 SAVE_NVGPRS(r1)
508 lwz r0,_TRAP(r1)
509 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
510 stw r0,_TRAP(r1) /* register set saved */
511 b sys_swapcontext
512
Paul Mackerras9994a332005-10-10 22:36:14 +1000513/*
514 * Top-level page fault handling.
515 * This is in assembler because if do_page_fault tells us that
516 * it is a bad kernel page fault, we want to save the non-volatile
517 * registers before calling bad_page_fault.
518 */
519 .globl handle_page_fault
520handle_page_fault:
521 stw r4,_DAR(r1)
522 addi r3,r1,STACK_FRAME_OVERHEAD
523 bl do_page_fault
524 cmpwi r3,0
525 beq+ ret_from_except
526 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000527 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000528 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000529 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000530 mr r5,r3
531 addi r3,r1,STACK_FRAME_OVERHEAD
532 lwz r4,_DAR(r1)
533 bl bad_page_fault
534 b ret_from_except_full
535
536/*
537 * This routine switches between two different tasks. The process
538 * state of one is saved on its kernel stack. Then the state
539 * of the other is restored from its kernel stack. The memory
540 * management hardware is updated to the second process's state.
541 * Finally, we can return to the second process.
542 * On entry, r3 points to the THREAD for the current task, r4
543 * points to the THREAD for the new task.
544 *
545 * This routine is always called with interrupts disabled.
546 *
547 * Note: there are two ways to get to the "going out" portion
548 * of this code; either by coming in via the entry (_switch)
549 * or via "fork" which must set up an environment equivalent
550 * to the "_switch" path. If you change this , you'll have to
551 * change the fork code also.
552 *
553 * The code which creates the new task context is in 'copy_thread'
554 * in arch/ppc/kernel/process.c
555 */
556_GLOBAL(_switch)
557 stwu r1,-INT_FRAME_SIZE(r1)
558 mflr r0
559 stw r0,INT_FRAME_SIZE+4(r1)
560 /* r3-r12 are caller saved -- Cort */
561 SAVE_NVGPRS(r1)
562 stw r0,_NIP(r1) /* Return to switch caller */
563 mfmsr r11
564 li r0,MSR_FP /* Disable floating-point */
565#ifdef CONFIG_ALTIVEC
566BEGIN_FTR_SECTION
567 oris r0,r0,MSR_VEC@h /* Disable altivec */
568 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
569 stw r12,THREAD+THREAD_VRSAVE(r2)
570END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
571#endif /* CONFIG_ALTIVEC */
572#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500573BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000574 oris r0,r0,MSR_SPE@h /* Disable SPE */
575 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
576 stw r12,THREAD+THREAD_SPEFSCR(r2)
Kumar Gala5e14d212007-09-13 01:44:20 -0500577END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000578#endif /* CONFIG_SPE */
579 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
580 beq+ 1f
581 andc r11,r11,r0
582 MTMSRD(r11)
583 isync
5841: stw r11,_MSR(r1)
585 mfcr r10
586 stw r10,_CCR(r1)
587 stw r1,KSP(r3) /* Set old stack pointer */
588
589#ifdef CONFIG_SMP
590 /* We need a sync somewhere here to make sure that if the
591 * previous task gets rescheduled on another CPU, it sees all
592 * stores it has performed on this one.
593 */
594 sync
595#endif /* CONFIG_SMP */
596
597 tophys(r0,r4)
598 CLR_TOP32(r0)
599 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
600 lwz r1,KSP(r4) /* Load new stack pointer */
601
602 /* save the old current 'last' for return value */
603 mr r3,r2
604 addi r2,r4,-THREAD /* Update current */
605
606#ifdef CONFIG_ALTIVEC
607BEGIN_FTR_SECTION
608 lwz r0,THREAD+THREAD_VRSAVE(r2)
609 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
610END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
611#endif /* CONFIG_ALTIVEC */
612#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500613BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000614 lwz r0,THREAD+THREAD_SPEFSCR(r2)
615 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
Kumar Gala5e14d212007-09-13 01:44:20 -0500616END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000617#endif /* CONFIG_SPE */
618
619 lwz r0,_CCR(r1)
620 mtcrf 0xFF,r0
621 /* r3-r12 are destroyed -- Cort */
622 REST_NVGPRS(r1)
623
624 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
625 mtlr r4
626 addi r1,r1,INT_FRAME_SIZE
627 blr
628
629 .globl fast_exception_return
630fast_exception_return:
631#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
632 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
633 beq 1f /* if not, we've got problems */
634#endif
635
6362: REST_4GPRS(3, r11)
637 lwz r10,_CCR(r11)
638 REST_GPR(1, r11)
639 mtcr r10
640 lwz r10,_LINK(r11)
641 mtlr r10
642 REST_GPR(10, r11)
643 mtspr SPRN_SRR1,r9
644 mtspr SPRN_SRR0,r12
645 REST_GPR(9, r11)
646 REST_GPR(12, r11)
647 lwz r11,GPR11(r11)
648 SYNC
649 RFI
650
651#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
652/* check if the exception happened in a restartable section */
6531: lis r3,exc_exit_restart_end@ha
654 addi r3,r3,exc_exit_restart_end@l
655 cmplw r12,r3
656 bge 3f
657 lis r4,exc_exit_restart@ha
658 addi r4,r4,exc_exit_restart@l
659 cmplw r12,r4
660 blt 3f
661 lis r3,fee_restarts@ha
662 tophys(r3,r3)
663 lwz r5,fee_restarts@l(r3)
664 addi r5,r5,1
665 stw r5,fee_restarts@l(r3)
666 mr r12,r4 /* restart at exc_exit_restart */
667 b 2b
668
Kumar Gala991eb432007-05-14 17:11:58 -0500669 .section .bss
670 .align 2
671fee_restarts:
672 .space 4
673 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +1000674
675/* aargh, a nonrecoverable interrupt, panic */
676/* aargh, we don't know which trap this is */
677/* but the 601 doesn't implement the RI bit, so assume it's OK */
6783:
679BEGIN_FTR_SECTION
680 b 2b
681END_FTR_SECTION_IFSET(CPU_FTR_601)
682 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000683 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000684 addi r3,r1,STACK_FRAME_OVERHEAD
685 lis r10,MSR_KERNEL@h
686 ori r10,r10,MSR_KERNEL@l
687 bl transfer_to_handler_full
688 .long nonrecoverable_exception
689 .long ret_from_except
690#endif
691
Paul Mackerras9994a332005-10-10 22:36:14 +1000692 .globl ret_from_except_full
693ret_from_except_full:
694 REST_NVGPRS(r1)
695 /* fall through */
696
697 .globl ret_from_except
698ret_from_except:
699 /* Hard-disable interrupts so that current_thread_info()->flags
700 * can't change between when we test it and when we return
701 * from the interrupt. */
702 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
703 SYNC /* Some chip revs have problems here... */
704 MTMSRD(r10) /* disable interrupts */
705
706 lwz r3,_MSR(r1) /* Returning to user mode? */
707 andi. r0,r3,MSR_PR
708 beq resume_kernel
709
710user_exc_return: /* r10 contains MSR_KERNEL here */
711 /* Check current_thread_info()->flags */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000712 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000713 lwz r9,TI_FLAGS(r9)
Roland McGrath7a101742008-04-28 17:30:37 +1000714 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000715 bne do_work
716
717restore_user:
718#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500719 /* Check whether this process has its own DBCR0 value. The internal
720 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000721 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500722 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000723 bnel- load_dbcr0
724#endif
725
726#ifdef CONFIG_PREEMPT
727 b restore
728
729/* N.B. the only way to get here is from the beq following ret_from_except. */
730resume_kernel:
731 /* check current_thread_info->preempt_count */
David Gibson6cb7bfe2005-10-21 15:45:50 +1000732 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000733 lwz r0,TI_PREEMPT(r9)
734 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
735 bne restore
736 lwz r0,TI_FLAGS(r9)
737 andi. r0,r0,_TIF_NEED_RESCHED
738 beq+ restore
739 andi. r0,r3,MSR_EE /* interrupts off? */
740 beq restore /* don't schedule if so */
7411: bl preempt_schedule_irq
David Gibson6cb7bfe2005-10-21 15:45:50 +1000742 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000743 lwz r3,TI_FLAGS(r9)
744 andi. r0,r3,_TIF_NEED_RESCHED
745 bne- 1b
746#else
747resume_kernel:
748#endif /* CONFIG_PREEMPT */
749
750 /* interrupts are hard-disabled at this point */
751restore:
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100752#ifdef CONFIG_44x
753 lis r4,icache_44x_need_flush@ha
754 lwz r5,icache_44x_need_flush@l(r4)
755 cmplwi cr0,r5,0
756 beq+ 1f
757 li r6,0
758 iccci r0,r0
759 stw r6,icache_44x_need_flush@l(r4)
7601:
761#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000762 lwz r0,GPR0(r1)
763 lwz r2,GPR2(r1)
764 REST_4GPRS(3, r1)
765 REST_2GPRS(7, r1)
766
767 lwz r10,_XER(r1)
768 lwz r11,_CTR(r1)
769 mtspr SPRN_XER,r10
770 mtctr r11
771
772 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100773BEGIN_FTR_SECTION
774 lwarx r11,0,r1
775END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000776 stwcx. r0,0,r1 /* to clear the reservation */
777
778#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
779 lwz r9,_MSR(r1)
780 andi. r10,r9,MSR_RI /* check if this exception occurred */
781 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
782
783 lwz r10,_CCR(r1)
784 lwz r11,_LINK(r1)
785 mtcrf 0xFF,r10
786 mtlr r11
787
788 /*
789 * Once we put values in SRR0 and SRR1, we are in a state
790 * where exceptions are not recoverable, since taking an
791 * exception will trash SRR0 and SRR1. Therefore we clear the
792 * MSR:RI bit to indicate this. If we do take an exception,
793 * we can't return to the point of the exception but we
794 * can restart the exception exit path at the label
795 * exc_exit_restart below. -- paulus
796 */
797 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
798 SYNC
799 MTMSRD(r10) /* clear the RI bit */
800 .globl exc_exit_restart
801exc_exit_restart:
802 lwz r9,_MSR(r1)
803 lwz r12,_NIP(r1)
804 FIX_SRR1(r9,r10)
805 mtspr SPRN_SRR0,r12
806 mtspr SPRN_SRR1,r9
807 REST_4GPRS(9, r1)
808 lwz r1,GPR1(r1)
809 .globl exc_exit_restart_end
810exc_exit_restart_end:
811 SYNC
812 RFI
813
814#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
815 /*
816 * This is a bit different on 4xx/Book-E because it doesn't have
817 * the RI bit in the MSR.
818 * The TLB miss handler checks if we have interrupted
819 * the exception exit path and restarts it if so
820 * (well maybe one day it will... :).
821 */
822 lwz r11,_LINK(r1)
823 mtlr r11
824 lwz r10,_CCR(r1)
825 mtcrf 0xff,r10
826 REST_2GPRS(9, r1)
827 .globl exc_exit_restart
828exc_exit_restart:
829 lwz r11,_NIP(r1)
830 lwz r12,_MSR(r1)
831exc_exit_start:
832 mtspr SPRN_SRR0,r11
833 mtspr SPRN_SRR1,r12
834 REST_2GPRS(11, r1)
835 lwz r1,GPR1(r1)
836 .globl exc_exit_restart_end
837exc_exit_restart_end:
838 PPC405_ERR77_SYNC
839 rfi
840 b . /* prevent prefetch past rfi */
841
842/*
843 * Returning from a critical interrupt in user mode doesn't need
844 * to be any different from a normal exception. For a critical
845 * interrupt in the kernel, we just return (without checking for
846 * preemption) since the interrupt may have happened at some crucial
847 * place (e.g. inside the TLB miss handler), and because we will be
848 * running with r1 pointing into critical_stack, not the current
849 * process's kernel stack (and therefore current_thread_info() will
850 * give the wrong answer).
851 * We have to restore various SPRs that may have been in use at the
852 * time of the critical interrupt.
853 *
854 */
855#ifdef CONFIG_40x
856#define PPC_40x_TURN_OFF_MSR_DR \
857 /* avoid any possible TLB misses here by turning off MSR.DR, we \
858 * assume the instructions here are mapped by a pinned TLB entry */ \
859 li r10,MSR_IR; \
860 mtmsr r10; \
861 isync; \
862 tophys(r1, r1);
863#else
864#define PPC_40x_TURN_OFF_MSR_DR
865#endif
866
867#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
868 REST_NVGPRS(r1); \
869 lwz r3,_MSR(r1); \
870 andi. r3,r3,MSR_PR; \
871 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
872 bne user_exc_return; \
873 lwz r0,GPR0(r1); \
874 lwz r2,GPR2(r1); \
875 REST_4GPRS(3, r1); \
876 REST_2GPRS(7, r1); \
877 lwz r10,_XER(r1); \
878 lwz r11,_CTR(r1); \
879 mtspr SPRN_XER,r10; \
880 mtctr r11; \
881 PPC405_ERR77(0,r1); \
882 stwcx. r0,0,r1; /* to clear the reservation */ \
883 lwz r11,_LINK(r1); \
884 mtlr r11; \
885 lwz r10,_CCR(r1); \
886 mtcrf 0xff,r10; \
887 PPC_40x_TURN_OFF_MSR_DR; \
888 lwz r9,_DEAR(r1); \
889 lwz r10,_ESR(r1); \
890 mtspr SPRN_DEAR,r9; \
891 mtspr SPRN_ESR,r10; \
892 lwz r11,_NIP(r1); \
893 lwz r12,_MSR(r1); \
894 mtspr exc_lvl_srr0,r11; \
895 mtspr exc_lvl_srr1,r12; \
896 lwz r9,GPR9(r1); \
897 lwz r12,GPR12(r1); \
898 lwz r10,GPR10(r1); \
899 lwz r11,GPR11(r1); \
900 lwz r1,GPR1(r1); \
901 PPC405_ERR77_SYNC; \
902 exc_lvl_rfi; \
903 b .; /* prevent prefetch past exc_lvl_rfi */
904
Kumar Galafca622c2008-04-30 05:23:21 -0500905#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
906 lwz r9,_##exc_lvl_srr0(r1); \
907 lwz r10,_##exc_lvl_srr1(r1); \
908 mtspr SPRN_##exc_lvl_srr0,r9; \
909 mtspr SPRN_##exc_lvl_srr1,r10;
910
911#if defined(CONFIG_FSL_BOOKE)
912#ifdef CONFIG_PHYS_64BIT
913#define RESTORE_MAS7 \
914 lwz r11,MAS7(r1); \
915 mtspr SPRN_MAS7,r11;
916#else
917#define RESTORE_MAS7
918#endif /* CONFIG_PHYS_64BIT */
919#define RESTORE_MMU_REGS \
920 lwz r9,MAS0(r1); \
921 lwz r10,MAS1(r1); \
922 lwz r11,MAS2(r1); \
923 mtspr SPRN_MAS0,r9; \
924 lwz r9,MAS3(r1); \
925 mtspr SPRN_MAS1,r10; \
926 lwz r10,MAS6(r1); \
927 mtspr SPRN_MAS2,r11; \
928 mtspr SPRN_MAS3,r9; \
929 mtspr SPRN_MAS6,r10; \
930 RESTORE_MAS7;
931#elif defined(CONFIG_44x)
932#define RESTORE_MMU_REGS \
933 lwz r9,MMUCR(r1); \
934 mtspr SPRN_MMUCR,r9;
935#else
936#define RESTORE_MMU_REGS
937#endif
938
939#ifdef CONFIG_40x
Paul Mackerras9994a332005-10-10 22:36:14 +1000940 .globl ret_from_crit_exc
941ret_from_crit_exc:
Kumar Galafca622c2008-04-30 05:23:21 -0500942 mfspr r9,SPRN_SPRG3
943 lis r10,saved_ksp_limit@ha;
944 lwz r10,saved_ksp_limit@l(r10);
945 tovirt(r9,r9);
946 stw r10,KSP_LIMIT(r9)
947 lis r9,crit_srr0@ha;
948 lwz r9,crit_srr0@l(r9);
949 lis r10,crit_srr1@ha;
950 lwz r10,crit_srr1@l(r10);
951 mtspr SPRN_SRR0,r9;
952 mtspr SPRN_SRR1,r10;
Paul Mackerras9994a332005-10-10 22:36:14 +1000953 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -0500954#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000955
956#ifdef CONFIG_BOOKE
Kumar Galafca622c2008-04-30 05:23:21 -0500957 .globl ret_from_crit_exc
958ret_from_crit_exc:
959 mfspr r9,SPRN_SPRG3
960 lwz r10,SAVED_KSP_LIMIT(r1)
961 stw r10,KSP_LIMIT(r9)
962 RESTORE_xSRR(SRR0,SRR1);
963 RESTORE_MMU_REGS;
964 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
965
Paul Mackerras9994a332005-10-10 22:36:14 +1000966 .globl ret_from_debug_exc
967ret_from_debug_exc:
Kumar Galafca622c2008-04-30 05:23:21 -0500968 mfspr r9,SPRN_SPRG3
969 lwz r10,SAVED_KSP_LIMIT(r1)
970 stw r10,KSP_LIMIT(r9)
971 lwz r9,THREAD_INFO-THREAD(r9)
972 rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
973 lwz r10,TI_PREEMPT(r10)
974 stw r10,TI_PREEMPT(r9)
975 RESTORE_xSRR(SRR0,SRR1);
976 RESTORE_xSRR(CSRR0,CSRR1);
977 RESTORE_MMU_REGS;
Paul Mackerras9994a332005-10-10 22:36:14 +1000978 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
979
980 .globl ret_from_mcheck_exc
981ret_from_mcheck_exc:
Kumar Galafca622c2008-04-30 05:23:21 -0500982 mfspr r9,SPRN_SPRG3
983 lwz r10,SAVED_KSP_LIMIT(r1)
984 stw r10,KSP_LIMIT(r9)
985 RESTORE_xSRR(SRR0,SRR1);
986 RESTORE_xSRR(CSRR0,CSRR1);
987 RESTORE_xSRR(DSRR0,DSRR1);
988 RESTORE_MMU_REGS;
Paul Mackerras9994a332005-10-10 22:36:14 +1000989 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
990#endif /* CONFIG_BOOKE */
991
992/*
993 * Load the DBCR0 value for a task that is being ptraced,
994 * having first saved away the global DBCR0. Note that r0
995 * has the dbcr0 value to set upon entry to this.
996 */
997load_dbcr0:
998 mfmsr r10 /* first disable debug exceptions */
999 rlwinm r10,r10,0,~MSR_DE
1000 mtmsr r10
1001 isync
1002 mfspr r10,SPRN_DBCR0
1003 lis r11,global_dbcr0@ha
1004 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -05001005#ifdef CONFIG_SMP
1006 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1007 lwz r9,TI_CPU(r9)
1008 slwi r9,r9,3
1009 add r11,r11,r9
1010#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10001011 stw r10,0(r11)
1012 mtspr SPRN_DBCR0,r0
1013 lwz r10,4(r11)
1014 addi r10,r10,1
1015 stw r10,4(r11)
1016 li r11,-1
1017 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1018 blr
1019
Kumar Gala991eb432007-05-14 17:11:58 -05001020 .section .bss
1021 .align 4
1022global_dbcr0:
Kumar Gala4eaddb42008-04-09 16:15:40 -05001023 .space 8*NR_CPUS
Kumar Gala991eb432007-05-14 17:11:58 -05001024 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001025#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1026
1027do_work: /* r10 contains MSR_KERNEL here */
1028 andi. r0,r9,_TIF_NEED_RESCHED
1029 beq do_user_signal
1030
1031do_resched: /* r10 contains MSR_KERNEL here */
1032 ori r10,r10,MSR_EE
1033 SYNC
1034 MTMSRD(r10) /* hard-enable interrupts */
1035 bl schedule
1036recheck:
1037 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1038 SYNC
1039 MTMSRD(r10) /* disable interrupts */
David Gibson6cb7bfe2005-10-21 15:45:50 +10001040 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
Paul Mackerras9994a332005-10-10 22:36:14 +10001041 lwz r9,TI_FLAGS(r9)
1042 andi. r0,r9,_TIF_NEED_RESCHED
1043 bne- do_resched
Roland McGrath7a101742008-04-28 17:30:37 +10001044 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +10001045 beq restore_user
1046do_user_signal: /* r10 contains MSR_KERNEL here */
1047 ori r10,r10,MSR_EE
1048 SYNC
1049 MTMSRD(r10) /* hard-enable interrupts */
1050 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001051 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001052 andi. r0,r3,1
1053 beq 2f
1054 SAVE_NVGPRS(r1)
1055 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001056 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100010572: li r3,0
1058 addi r4,r1,STACK_FRAME_OVERHEAD
1059 bl do_signal
1060 REST_NVGPRS(r1)
1061 b recheck
1062
1063/*
1064 * We come here when we are at the end of handling an exception
1065 * that occurred at a place where taking an exception will lose
1066 * state information, such as the contents of SRR0 and SRR1.
1067 */
1068nonrecoverable:
1069 lis r10,exc_exit_restart_end@ha
1070 addi r10,r10,exc_exit_restart_end@l
1071 cmplw r12,r10
1072 bge 3f
1073 lis r11,exc_exit_restart@ha
1074 addi r11,r11,exc_exit_restart@l
1075 cmplw r12,r11
1076 blt 3f
1077 lis r10,ee_restarts@ha
1078 lwz r12,ee_restarts@l(r10)
1079 addi r12,r12,1
1080 stw r12,ee_restarts@l(r10)
1081 mr r12,r11 /* restart at exc_exit_restart */
1082 blr
10833: /* OK, we can't recover, kill this process */
1084 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1085BEGIN_FTR_SECTION
1086 blr
1087END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001088 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001089 andi. r0,r3,1
1090 beq 4f
1091 SAVE_NVGPRS(r1)
1092 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001093 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100010944: addi r3,r1,STACK_FRAME_OVERHEAD
1095 bl nonrecoverable_exception
1096 /* shouldn't return */
1097 b 4b
1098
Kumar Gala991eb432007-05-14 17:11:58 -05001099 .section .bss
1100 .align 2
1101ee_restarts:
1102 .space 4
1103 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001104
1105/*
1106 * PROM code for specific machines follows. Put it
1107 * here so it's easy to add arch-specific sections later.
1108 * -- Cort
1109 */
Paul Mackerras033ef332005-10-26 17:05:24 +10001110#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +10001111/*
1112 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1113 * called with the MMU off.
1114 */
1115_GLOBAL(enter_rtas)
1116 stwu r1,-INT_FRAME_SIZE(r1)
1117 mflr r0
1118 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +11001119 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001120 lis r6,1f@ha /* physical return address for rtas */
1121 addi r6,r6,1f@l
1122 tophys(r6,r6)
1123 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +10001124 lwz r8,RTASENTRY(r4)
1125 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +10001126 mfmsr r9
1127 stw r9,8(r1)
1128 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1129 SYNC /* disable interrupts so SRR0/1 */
1130 MTMSRD(r0) /* don't get trashed */
1131 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1132 mtlr r6
Paul Mackerras9994a332005-10-10 22:36:14 +10001133 mtspr SPRN_SPRG2,r7
1134 mtspr SPRN_SRR0,r8
1135 mtspr SPRN_SRR1,r9
1136 RFI
11371: tophys(r9,r1)
1138 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1139 lwz r9,8(r9) /* original msr value */
1140 FIX_SRR1(r9,r0)
1141 addi r1,r1,INT_FRAME_SIZE
1142 li r0,0
1143 mtspr SPRN_SPRG2,r0
1144 mtspr SPRN_SRR0,r8
1145 mtspr SPRN_SRR1,r9
1146 RFI /* return to caller */
1147
1148 .globl machine_check_in_rtas
1149machine_check_in_rtas:
1150 twi 31,0,0
1151 /* XXX load up BATs and panic */
1152
Paul Mackerras033ef332005-10-26 17:05:24 +10001153#endif /* CONFIG_PPC_RTAS */