blob: 13019845536b37b44eccf19249b012a9fcf3158e [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100033
34/*
35 * System calls.
36 */
37 .section ".toc","aw"
38.SYS_CALL_TABLE:
39 .tc .sys_call_table[TC],.sys_call_table
40
41/* This value is used to mark exception frames on the stack. */
42exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100043 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100044
45 .section ".text"
46 .align 7
47
48#undef SHOW_SYSCALLS
49
50 .globl system_call_common
51system_call_common:
52 andi. r10,r12,MSR_PR
53 mr r10,r1
54 addi r1,r1,-INT_FRAME_SIZE
55 beq- 1f
56 ld r1,PACAKSAVE(r13)
571: std r10,0(r1)
Anton Blanchardbd19c892006-06-11 01:15:55 +100058 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100059 std r11,_NIP(r1)
60 std r12,_MSR(r1)
61 std r0,GPR0(r1)
62 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110063 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerras9994a332005-10-10 22:36:14 +100064 std r2,GPR2(r1)
65 std r3,GPR3(r1)
66 std r4,GPR4(r1)
67 std r5,GPR5(r1)
68 std r6,GPR6(r1)
69 std r7,GPR7(r1)
70 std r8,GPR8(r1)
71 li r11,0
72 std r11,GPR9(r1)
73 std r11,GPR10(r1)
74 std r11,GPR11(r1)
75 std r11,GPR12(r1)
76 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100077 mfcr r9
78 mflr r10
79 li r11,0xc01
80 std r9,_CCR(r1)
81 std r10,_LINK(r1)
82 std r11,_TRAP(r1)
83 mfxer r9
84 mfctr r10
85 std r9,_XER(r1)
86 std r10,_CTR(r1)
87 std r3,ORIG_GPR3(r1)
88 ld r2,PACATOC(r13)
89 addi r9,r1,STACK_FRAME_OVERHEAD
90 ld r11,exception_marker@toc(r2)
91 std r11,-16(r9) /* "regshere" marker */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100092 li r10,1
93 stb r10,PACASOFTIRQEN(r13)
94 stb r10,PACAHARDIRQEN(r13)
95 std r10,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100096#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100097BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +100098 /* Hack for handling interrupts when soft-enabling on iSeries */
99 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
100 andi. r10,r12,MSR_PR /* from kernel */
101 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
Stephen Rothwellc7056772006-11-27 14:59:50 +1100102 bne 2f
103 b hardware_interrupt_entry
1042:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000105END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Paul Mackerras9994a332005-10-10 22:36:14 +1000106#endif
107 mfmsr r11
108 ori r11,r11,MSR_EE
109 mtmsrd r11,1
110
111#ifdef SHOW_SYSCALLS
112 bl .do_show_syscall
113 REST_GPR(0,r1)
114 REST_4GPRS(3,r1)
115 REST_2GPRS(7,r1)
116 addi r9,r1,STACK_FRAME_OVERHEAD
117#endif
118 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000119 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000120 andi. r11,r10,_TIF_SYSCALL_T_OR_A
121 bne- syscall_dotrace
122syscall_dotrace_cont:
123 cmpldi 0,r0,NR_syscalls
124 bge- syscall_enosys
125
126system_call: /* label this so stack traces look sane */
127/*
128 * Need to vector to 32 Bit or default sys_call_table here,
129 * based on caller's run-mode / personality.
130 */
131 ld r11,.SYS_CALL_TABLE@toc(2)
132 andi. r10,r10,_TIF_32BIT
133 beq 15f
134 addi r11,r11,8 /* use 32-bit syscall entries */
135 clrldi r3,r3,32
136 clrldi r4,r4,32
137 clrldi r5,r5,32
138 clrldi r6,r6,32
139 clrldi r7,r7,32
140 clrldi r8,r8,32
14115:
142 slwi r0,r0,4
143 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
144 mtctr r10
145 bctrl /* Call handler */
146
147syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000148 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000149#ifdef SHOW_SYSCALLS
150 bl .do_show_syscall_exit
151 ld r3,RESULT(r1)
152#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000153 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000154
155 /* disable interrupts so current_thread_info()->flags can't change,
156 and so that we don't get interrupted after loading SRR0/1. */
157 ld r8,_MSR(r1)
158 andi. r10,r8,MSR_RI
159 beq- unrecov_restore
160 mfmsr r10
161 rldicl r10,r10,48,1
162 rotldi r10,r10,16
163 mtmsrd r10,1
164 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000165 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100166 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000167 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000168 cmpld r3,r11
169 ld r5,_CCR(r1)
170 bge- syscall_error
171syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000172 ld r7,_NIP(r1)
173 stdcx. r0,0,r1 /* to clear the reservation */
174 andi. r6,r8,MSR_PR
175 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100176 /*
177 * Clear RI before restoring r13. If we are returning to
178 * userspace and we take an exception after restoring r13,
179 * we end up corrupting the userspace r13 value.
180 */
181 li r12,MSR_RI
182 andc r11,r10,r12
183 mtmsrd r11,1 /* clear MSR.RI */
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100184 beq- 1f
185 ACCOUNT_CPU_USER_EXIT(r11, r12)
186 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10001871: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000188 ld r1,GPR1(r1)
189 mtlr r4
190 mtcr r5
191 mtspr SPRN_SRR0,r7
192 mtspr SPRN_SRR1,r8
193 rfid
194 b . /* prevent speculative execution */
195
David Woodhouse401d1f02005-11-15 18:52:18 +0000196syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000197 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000198 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000199 std r5,_CCR(r1)
200 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000201
Paul Mackerras9994a332005-10-10 22:36:14 +1000202/* Traced system call support */
203syscall_dotrace:
204 bl .save_nvgprs
205 addi r3,r1,STACK_FRAME_OVERHEAD
206 bl .do_syscall_trace_enter
207 ld r0,GPR0(r1) /* Restore original registers */
208 ld r3,GPR3(r1)
209 ld r4,GPR4(r1)
210 ld r5,GPR5(r1)
211 ld r6,GPR6(r1)
212 ld r7,GPR7(r1)
213 ld r8,GPR8(r1)
214 addi r9,r1,STACK_FRAME_OVERHEAD
215 clrrdi r10,r1,THREAD_SHIFT
216 ld r10,TI_FLAGS(r10)
217 b syscall_dotrace_cont
218
David Woodhouse401d1f02005-11-15 18:52:18 +0000219syscall_enosys:
220 li r3,-ENOSYS
221 b syscall_exit
222
223syscall_exit_work:
224 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
225 If TIF_NOERROR is set, just save r3 as it is. */
226
227 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100228 beq+ 0f
229 REST_NVGPRS(r1)
230 b 2f
2310: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000232 blt+ 1f
233 andi. r0,r9,_TIF_NOERROR
234 bne- 1f
235 ld r5,_CCR(r1)
236 neg r3,r3
237 oris r5,r5,0x1000 /* Set SO bit in CR */
238 std r5,_CCR(r1)
2391: std r3,GPR3(r1)
2402: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
241 beq 4f
242
Paul Mackerras1bd79332006-03-08 13:24:22 +1100243 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000244
245 li r11,_TIF_PERSYSCALL_MASK
246 addi r12,r12,TI_FLAGS
2473: ldarx r10,0,r12
248 andc r10,r10,r11
249 stdcx. r10,0,r12
250 bne- 3b
251 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100252
2534: /* Anything else left to do? */
254 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000255 beq .ret_from_except_lite
256
257 /* Re-enable interrupts */
258 mfmsr r10
259 ori r10,r10,MSR_EE
260 mtmsrd r10,1
261
Paul Mackerras1bd79332006-03-08 13:24:22 +1100262 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000263 addi r3,r1,STACK_FRAME_OVERHEAD
264 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100265 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000266
267/* Save non-volatile GPRs, if not already saved. */
268_GLOBAL(save_nvgprs)
269 ld r11,_TRAP(r1)
270 andi. r0,r11,1
271 beqlr-
272 SAVE_NVGPRS(r1)
273 clrrdi r0,r11,1
274 std r0,_TRAP(r1)
275 blr
276
David Woodhouse401d1f02005-11-15 18:52:18 +0000277
Paul Mackerras9994a332005-10-10 22:36:14 +1000278/*
279 * The sigsuspend and rt_sigsuspend system calls can call do_signal
280 * and thus put the process into the stopped state where we might
281 * want to examine its user state with ptrace. Therefore we need
282 * to save all the nonvolatile registers (r14 - r31) before calling
283 * the C code. Similarly, fork, vfork and clone need the full
284 * register state on the stack so that it can be copied to the child.
285 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000286
287_GLOBAL(ppc_fork)
288 bl .save_nvgprs
289 bl .sys_fork
290 b syscall_exit
291
292_GLOBAL(ppc_vfork)
293 bl .save_nvgprs
294 bl .sys_vfork
295 b syscall_exit
296
297_GLOBAL(ppc_clone)
298 bl .save_nvgprs
299 bl .sys_clone
300 b syscall_exit
301
Paul Mackerras1bd79332006-03-08 13:24:22 +1100302_GLOBAL(ppc32_swapcontext)
303 bl .save_nvgprs
304 bl .compat_sys_swapcontext
305 b syscall_exit
306
307_GLOBAL(ppc64_swapcontext)
308 bl .save_nvgprs
309 bl .sys_swapcontext
310 b syscall_exit
311
Paul Mackerras9994a332005-10-10 22:36:14 +1000312_GLOBAL(ret_from_fork)
313 bl .schedule_tail
314 REST_NVGPRS(r1)
315 li r3,0
316 b syscall_exit
317
318/*
319 * This routine switches between two different tasks. The process
320 * state of one is saved on its kernel stack. Then the state
321 * of the other is restored from its kernel stack. The memory
322 * management hardware is updated to the second process's state.
323 * Finally, we can return to the second process, via ret_from_except.
324 * On entry, r3 points to the THREAD for the current task, r4
325 * points to the THREAD for the new task.
326 *
327 * Note: there are two ways to get to the "going out" portion
328 * of this code; either by coming in via the entry (_switch)
329 * or via "fork" which must set up an environment equivalent
330 * to the "_switch" path. If you change this you'll have to change
331 * the fork code also.
332 *
333 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600334 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000335 */
336 .align 7
337_GLOBAL(_switch)
338 mflr r0
339 std r0,16(r1)
340 stdu r1,-SWITCH_FRAME_SIZE(r1)
341 /* r3-r13 are caller saved -- Cort */
342 SAVE_8GPRS(14, r1)
343 SAVE_10GPRS(22, r1)
344 mflr r20 /* Return to switch caller */
345 mfmsr r22
346 li r0, MSR_FP
347#ifdef CONFIG_ALTIVEC
348BEGIN_FTR_SECTION
349 oris r0,r0,MSR_VEC@h /* Disable altivec */
350 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
351 std r24,THREAD_VRSAVE(r3)
352END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
353#endif /* CONFIG_ALTIVEC */
354 and. r0,r0,r22
355 beq+ 1f
356 andc r22,r22,r0
357 mtmsrd r22
358 isync
3591: std r20,_NIP(r1)
360 mfcr r23
361 std r23,_CCR(r1)
362 std r1,KSP(r3) /* Set old stack pointer */
363
364#ifdef CONFIG_SMP
365 /* We need a sync somewhere here to make sure that if the
366 * previous task gets rescheduled on another CPU, it sees all
367 * stores it has performed on this one.
368 */
369 sync
370#endif /* CONFIG_SMP */
371
372 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
373 std r6,PACACURRENT(r13) /* Set new 'current' */
374
375 ld r8,KSP(r4) /* new stack pointer */
376BEGIN_FTR_SECTION
Paul Mackerras1189be62007-10-11 20:37:10 +1000377 b 2f
378END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
379BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000380 clrrdi r6,r8,28 /* get its ESID */
381 clrrdi r9,r1,28 /* get current sp ESID */
Paul Mackerras1189be62007-10-11 20:37:10 +1000382END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
383BEGIN_FTR_SECTION
384 clrrdi r6,r8,40 /* get its 1T ESID */
385 clrrdi r9,r1,40 /* get current sp 1T ESID */
386END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000387 clrldi. r0,r6,2 /* is new ESID c00000000? */
388 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
389 cror eq,4*cr1+eq,eq
390 beq 2f /* if yes, don't slbie it */
391
392 /* Bolt in the new stack SLB entry */
393 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
394 oris r0,r6,(SLB_ESID_V)@h
395 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000396BEGIN_FTR_SECTION
397 li r9,MMU_SEGSIZE_1T /* insert B field */
398 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
399 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
400END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000401
Michael Neuling00efee72007-08-24 16:58:37 +1000402 /* Update the last bolted SLB. No write barriers are needed
403 * here, provided we only update the current CPU's SLB shadow
404 * buffer.
405 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000406 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000407 li r12,0
408 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
409 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
410 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000411
Olof Johanssonf66bce52007-10-16 00:58:59 +1000412 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
413 * we have 1TB segments, the only CPUs known to have the errata
414 * only support less than 1TB of system memory and we'll never
415 * actually hit this code path.
416 */
417
Paul Mackerras9994a332005-10-10 22:36:14 +1000418 slbie r6
419 slbie r6 /* Workaround POWER5 < DD2.1 issue */
420 slbmte r7,r0
421 isync
422
4232:
Paul Mackerras9994a332005-10-10 22:36:14 +1000424 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
425 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
426 because we don't need to leave the 288-byte ABI gap at the
427 top of the kernel stack. */
428 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
429
430 mr r1,r8 /* start using new stack pointer */
431 std r7,PACAKSAVE(r13)
432
433 ld r6,_CCR(r1)
434 mtcrf 0xFF,r6
435
436#ifdef CONFIG_ALTIVEC
437BEGIN_FTR_SECTION
438 ld r0,THREAD_VRSAVE(r4)
439 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
440END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
441#endif /* CONFIG_ALTIVEC */
442
443 /* r3-r13 are destroyed -- Cort */
444 REST_8GPRS(14, r1)
445 REST_10GPRS(22, r1)
446
447 /* convert old thread to its task_struct for return value */
448 addi r3,r3,-THREAD
449 ld r7,_NIP(r1) /* Return to _switch caller in new task */
450 mtlr r7
451 addi r1,r1,SWITCH_FRAME_SIZE
452 blr
453
454 .align 7
455_GLOBAL(ret_from_except)
456 ld r11,_TRAP(r1)
457 andi. r0,r11,1
458 bne .ret_from_except_lite
459 REST_NVGPRS(r1)
460
461_GLOBAL(ret_from_except_lite)
462 /*
463 * Disable interrupts so that current_thread_info()->flags
464 * can't change between when we test it and when we return
465 * from the interrupt.
466 */
467 mfmsr r10 /* Get current interrupt state */
468 rldicl r9,r10,48,1 /* clear MSR_EE */
469 rotldi r9,r9,16
470 mtmsrd r9,1 /* Update machine state */
471
472#ifdef CONFIG_PREEMPT
473 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
474 li r0,_TIF_NEED_RESCHED /* bits to check */
475 ld r3,_MSR(r1)
476 ld r4,TI_FLAGS(r9)
477 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
478 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
479 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
480 bne do_work
481
482#else /* !CONFIG_PREEMPT */
483 ld r3,_MSR(r1) /* Returning to user mode? */
484 andi. r3,r3,MSR_PR
485 beq restore /* if not, just restore regs and return */
486
487 /* Check current_thread_info()->flags */
488 clrrdi r9,r1,THREAD_SHIFT
489 ld r4,TI_FLAGS(r9)
490 andi. r0,r4,_TIF_USER_WORK_MASK
491 bne do_work
492#endif
493
494restore:
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000495 ld r5,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000496#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000497BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000498 cmpdi 0,r5,0
499 beq 4f
500 /* Check for pending interrupts (iSeries) */
David Gibson3356bb92006-01-13 10:26:42 +1100501 ld r3,PACALPPACAPTR(r13)
502 ld r3,LPPACAANYINT(r3)
Paul Mackerras9994a332005-10-10 22:36:14 +1000503 cmpdi r3,0
504 beq+ 4f /* skip do_IRQ if no interrupts */
505
506 li r3,0
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000507 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
Paul Mackerras9994a332005-10-10 22:36:14 +1000508 ori r10,r10,MSR_EE
509 mtmsrd r10 /* hard-enable again */
510 addi r3,r1,STACK_FRAME_OVERHEAD
511 bl .do_IRQ
512 b .ret_from_except_lite /* loop back and handle more */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10005134:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000514END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Paul Mackerras9994a332005-10-10 22:36:14 +1000515#endif
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000516 stb r5,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000517
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000518 /* extract EE bit and use it to restore paca->hard_enabled */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100519 ld r3,_MSR(r1)
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000520 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
521 stb r4,PACAHARDIRQEN(r13)
522
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100523 ld r4,_CTR(r1)
524 ld r0,_LINK(r1)
525 mtctr r4
526 mtlr r0
527 ld r4,_XER(r1)
528 mtspr SPRN_XER,r4
529
530 REST_8GPRS(5, r1)
531
532 andi. r0,r3,MSR_RI
533 beq- unrecov_restore
534
535 stdcx. r0,0,r1 /* to clear the reservation */
536
537 /*
538 * Clear RI before restoring r13. If we are returning to
539 * userspace and we take an exception after restoring r13,
540 * we end up corrupting the userspace r13 value.
541 */
542 mfmsr r4
543 andc r4,r4,r0 /* r0 contains MSR_RI here */
544 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000545
546 /*
547 * r13 is our per cpu area, only restore it if we are returning to
548 * userspace
549 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100550 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000551 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100552 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000553 REST_GPR(13, r1)
5541:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100555 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000556
557 ld r2,_CCR(r1)
558 mtcrf 0xFF,r2
559 ld r2,_NIP(r1)
560 mtspr SPRN_SRR0,r2
561
562 ld r0,GPR0(r1)
563 ld r2,GPR2(r1)
564 ld r3,GPR3(r1)
565 ld r4,GPR4(r1)
566 ld r1,GPR1(r1)
567
568 rfid
569 b . /* prevent speculative execution */
570
Paul Mackerras9994a332005-10-10 22:36:14 +1000571do_work:
572#ifdef CONFIG_PREEMPT
573 andi. r0,r3,MSR_PR /* Returning to user mode? */
574 bne user_work
575 /* Check that preempt_count() == 0 and interrupts are enabled */
576 lwz r8,TI_PREEMPT(r9)
577 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000578 ld r0,SOFTE(r1)
579 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000580 crandc eq,cr1*4+eq,eq
581 bne restore
582 /* here we are preempting the current task */
5831:
Paul Mackerras9994a332005-10-10 22:36:14 +1000584 li r0,1
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000585 stb r0,PACASOFTIRQEN(r13)
586 stb r0,PACAHARDIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000587 ori r10,r10,MSR_EE
588 mtmsrd r10,1 /* reenable interrupts */
589 bl .preempt_schedule
590 mfmsr r10
591 clrrdi r9,r1,THREAD_SHIFT
592 rldicl r10,r10,48,1 /* disable interrupts again */
593 rotldi r10,r10,16
594 mtmsrd r10,1
595 ld r4,TI_FLAGS(r9)
596 andi. r0,r4,_TIF_NEED_RESCHED
597 bne 1b
598 b restore
599
600user_work:
601#endif
602 /* Enable interrupts */
603 ori r10,r10,MSR_EE
604 mtmsrd r10,1
605
606 andi. r0,r4,_TIF_NEED_RESCHED
607 beq 1f
608 bl .schedule
609 b .ret_from_except_lite
610
6111: bl .save_nvgprs
612 li r3,0
613 addi r4,r1,STACK_FRAME_OVERHEAD
614 bl .do_signal
615 b .ret_from_except
616
617unrecov_restore:
618 addi r3,r1,STACK_FRAME_OVERHEAD
619 bl .unrecoverable_exception
620 b unrecov_restore
621
622#ifdef CONFIG_PPC_RTAS
623/*
624 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
625 * called with the MMU off.
626 *
627 * In addition, we need to be in 32b mode, at least for now.
628 *
629 * Note: r3 is an input parameter to rtas, so don't trash it...
630 */
631_GLOBAL(enter_rtas)
632 mflr r0
633 std r0,16(r1)
634 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
635
636 /* Because RTAS is running in 32b mode, it clobbers the high order half
637 * of all registers that it saves. We therefore save those registers
638 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
639 */
640 SAVE_GPR(2, r1) /* Save the TOC */
641 SAVE_GPR(13, r1) /* Save paca */
642 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
643 SAVE_10GPRS(22, r1) /* ditto */
644
645 mfcr r4
646 std r4,_CCR(r1)
647 mfctr r5
648 std r5,_CTR(r1)
649 mfspr r6,SPRN_XER
650 std r6,_XER(r1)
651 mfdar r7
652 std r7,_DAR(r1)
653 mfdsisr r8
654 std r8,_DSISR(r1)
655 mfsrr0 r9
656 std r9,_SRR0(r1)
657 mfsrr1 r10
658 std r10,_SRR1(r1)
659
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800660 /* Temporary workaround to clear CR until RTAS can be modified to
661 * ignore all bits.
662 */
663 li r0,0
664 mtcr r0
665
David Woodhouse007d88d2007-01-01 18:45:34 +0000666#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000667 /* There is no way it is acceptable to get here with interrupts enabled,
668 * check it with the asm equivalent of WARN_ON
669 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000670 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10006711: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000672 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
673#endif
674
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000675 /* Hard-disable interrupts */
676 mfmsr r6
677 rldicl r7,r6,48,1
678 rotldi r7,r7,16
679 mtmsrd r7,1
680
Paul Mackerras9994a332005-10-10 22:36:14 +1000681 /* Unfortunately, the stack pointer and the MSR are also clobbered,
682 * so they are saved in the PACA which allows us to restore
683 * our original state after RTAS returns.
684 */
685 std r1,PACAR1(r13)
686 std r6,PACASAVEDMSR(r13)
687
688 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100689 LOAD_REG_ADDR(r4,.rtas_return_loc)
690 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000691 mtlr r4
692
693 li r0,0
694 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
695 andc r0,r6,r0
696
697 li r9,1
698 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
699 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
700 andc r6,r0,r9
701 ori r6,r6,MSR_RI
702 sync /* disable interrupts so SRR0/1 */
703 mtmsrd r0 /* don't get trashed */
704
David Gibsone58c3492006-01-13 14:56:25 +1100705 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000706 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
707 ld r4,RTASBASE(r4) /* get the rtas->base value */
708
709 mtspr SPRN_SRR0,r5
710 mtspr SPRN_SRR1,r6
711 rfid
712 b . /* prevent speculative execution */
713
714_STATIC(rtas_return_loc)
715 /* relocation is off at this point */
716 mfspr r4,SPRN_SPRG3 /* Get PACA */
David Gibsone58c3492006-01-13 14:56:25 +1100717 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000718
719 mfmsr r6
720 li r0,MSR_RI
721 andc r6,r6,r0
722 sync
723 mtmsrd r6
724
725 ld r1,PACAR1(r4) /* Restore our SP */
David Gibsone58c3492006-01-13 14:56:25 +1100726 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
Paul Mackerras9994a332005-10-10 22:36:14 +1000727 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
728
729 mtspr SPRN_SRR0,r3
730 mtspr SPRN_SRR1,r4
731 rfid
732 b . /* prevent speculative execution */
733
734_STATIC(rtas_restore_regs)
735 /* relocation is on at this point */
736 REST_GPR(2, r1) /* Restore the TOC */
737 REST_GPR(13, r1) /* Restore paca */
738 REST_8GPRS(14, r1) /* Restore the non-volatiles */
739 REST_10GPRS(22, r1) /* ditto */
740
741 mfspr r13,SPRN_SPRG3
742
743 ld r4,_CCR(r1)
744 mtcr r4
745 ld r5,_CTR(r1)
746 mtctr r5
747 ld r6,_XER(r1)
748 mtspr SPRN_XER,r6
749 ld r7,_DAR(r1)
750 mtdar r7
751 ld r8,_DSISR(r1)
752 mtdsisr r8
753 ld r9,_SRR0(r1)
754 mtsrr0 r9
755 ld r10,_SRR1(r1)
756 mtsrr1 r10
757
758 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
759 ld r0,16(r1) /* get return address */
760
761 mtlr r0
762 blr /* return to caller */
763
764#endif /* CONFIG_PPC_RTAS */
765
Paul Mackerras9994a332005-10-10 22:36:14 +1000766_GLOBAL(enter_prom)
767 mflr r0
768 std r0,16(r1)
769 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
770
771 /* Because PROM is running in 32b mode, it clobbers the high order half
772 * of all registers that it saves. We therefore save those registers
773 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
774 */
775 SAVE_8GPRS(2, r1)
776 SAVE_GPR(13, r1)
777 SAVE_8GPRS(14, r1)
778 SAVE_10GPRS(22, r1)
779 mfcr r4
780 std r4,_CCR(r1)
781 mfctr r5
782 std r5,_CTR(r1)
783 mfspr r6,SPRN_XER
784 std r6,_XER(r1)
785 mfdar r7
786 std r7,_DAR(r1)
787 mfdsisr r8
788 std r8,_DSISR(r1)
789 mfsrr0 r9
790 std r9,_SRR0(r1)
791 mfsrr1 r10
792 std r10,_SRR1(r1)
793 mfmsr r11
794 std r11,_MSR(r1)
795
796 /* Get the PROM entrypoint */
797 ld r0,GPR4(r1)
798 mtlr r0
799
800 /* Switch MSR to 32 bits mode
801 */
802 mfmsr r11
803 li r12,1
804 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
805 andc r11,r11,r12
806 li r12,1
807 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
808 andc r11,r11,r12
809 mtmsrd r11
810 isync
811
812 /* Restore arguments & enter PROM here... */
813 ld r3,GPR3(r1)
814 blrl
815
816 /* Just make sure that r1 top 32 bits didn't get
817 * corrupt by OF
818 */
819 rldicl r1,r1,0,32
820
821 /* Restore the MSR (back to 64 bits) */
822 ld r0,_MSR(r1)
823 mtmsrd r0
824 isync
825
826 /* Restore other registers */
827 REST_GPR(2, r1)
828 REST_GPR(13, r1)
829 REST_8GPRS(14, r1)
830 REST_10GPRS(22, r1)
831 ld r4,_CCR(r1)
832 mtcr r4
833 ld r5,_CTR(r1)
834 mtctr r5
835 ld r6,_XER(r1)
836 mtspr SPRN_XER,r6
837 ld r7,_DAR(r1)
838 mtdar r7
839 ld r8,_DSISR(r1)
840 mtdsisr r8
841 ld r9,_SRR0(r1)
842 mtsrr0 r9
843 ld r10,_SRR1(r1)
844 mtsrr1 r10
845
846 addi r1,r1,PROM_FRAME_SIZE
847 ld r0,16(r1)
848 mtlr r0
849 blr