blob: 2f511a969d2cfb9e9db66f12aa41a293ce32e026 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100045 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100046
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
Anton Blanchardbd19c892006-06-11 01:15:55 +100060 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100061 std r11,_NIP(r1)
62 std r12,_MSR(r1)
63 std r0,GPR0(r1)
64 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110065 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerras9994a332005-10-10 22:36:14 +100066 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100079 mfcr r9
80 mflr r10
81 li r11,0xc01
82 std r9,_CCR(r1)
83 std r10,_LINK(r1)
84 std r11,_TRAP(r1)
85 mfxer r9
86 mfctr r10
87 std r9,_XER(r1)
88 std r10,_CTR(r1)
89 std r3,ORIG_GPR3(r1)
90 ld r2,PACATOC(r13)
91 addi r9,r1,STACK_FRAME_OVERHEAD
92 ld r11,exception_marker@toc(r2)
93 std r11,-16(r9) /* "regshere" marker */
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100094#ifdef CONFIG_TRACE_IRQFLAGS
95 bl .trace_hardirqs_on
96 REST_GPR(0,r1)
97 REST_4GPRS(3,r1)
98 REST_2GPRS(7,r1)
99 addi r9,r1,STACK_FRAME_OVERHEAD
100 ld r12,_MSR(r1)
101#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000102 li r10,1
103 stb r10,PACASOFTIRQEN(r13)
104 stb r10,PACAHARDIRQEN(r13)
105 std r10,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000106#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000107BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000108 /* Hack for handling interrupts when soft-enabling on iSeries */
109 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
110 andi. r10,r12,MSR_PR /* from kernel */
111 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
Stephen Rothwellc7056772006-11-27 14:59:50 +1100112 bne 2f
113 b hardware_interrupt_entry
1142:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000115END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000116#endif /* CONFIG_PPC_ISERIES */
Paul Mackerras9994a332005-10-10 22:36:14 +1000117 mfmsr r11
118 ori r11,r11,MSR_EE
119 mtmsrd r11,1
120
121#ifdef SHOW_SYSCALLS
122 bl .do_show_syscall
123 REST_GPR(0,r1)
124 REST_4GPRS(3,r1)
125 REST_2GPRS(7,r1)
126 addi r9,r1,STACK_FRAME_OVERHEAD
127#endif
128 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000129 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000130 andi. r11,r10,_TIF_SYSCALL_T_OR_A
131 bne- syscall_dotrace
132syscall_dotrace_cont:
133 cmpldi 0,r0,NR_syscalls
134 bge- syscall_enosys
135
136system_call: /* label this so stack traces look sane */
137/*
138 * Need to vector to 32 Bit or default sys_call_table here,
139 * based on caller's run-mode / personality.
140 */
141 ld r11,.SYS_CALL_TABLE@toc(2)
142 andi. r10,r10,_TIF_32BIT
143 beq 15f
144 addi r11,r11,8 /* use 32-bit syscall entries */
145 clrldi r3,r3,32
146 clrldi r4,r4,32
147 clrldi r5,r5,32
148 clrldi r6,r6,32
149 clrldi r7,r7,32
150 clrldi r8,r8,32
15115:
152 slwi r0,r0,4
153 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
154 mtctr r10
155 bctrl /* Call handler */
156
157syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000158 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000159#ifdef SHOW_SYSCALLS
160 bl .do_show_syscall_exit
161 ld r3,RESULT(r1)
162#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000163 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000164
165 /* disable interrupts so current_thread_info()->flags can't change,
166 and so that we don't get interrupted after loading SRR0/1. */
167 ld r8,_MSR(r1)
168 andi. r10,r8,MSR_RI
169 beq- unrecov_restore
170 mfmsr r10
171 rldicl r10,r10,48,1
172 rotldi r10,r10,16
173 mtmsrd r10,1
174 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000175 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100176 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000177 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000178 cmpld r3,r11
179 ld r5,_CCR(r1)
180 bge- syscall_error
181syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000182 ld r7,_NIP(r1)
183 stdcx. r0,0,r1 /* to clear the reservation */
184 andi. r6,r8,MSR_PR
185 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100186 /*
187 * Clear RI before restoring r13. If we are returning to
188 * userspace and we take an exception after restoring r13,
189 * we end up corrupting the userspace r13 value.
190 */
191 li r12,MSR_RI
192 andc r11,r10,r12
193 mtmsrd r11,1 /* clear MSR.RI */
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100194 beq- 1f
195 ACCOUNT_CPU_USER_EXIT(r11, r12)
196 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10001971: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000198 ld r1,GPR1(r1)
199 mtlr r4
200 mtcr r5
201 mtspr SPRN_SRR0,r7
202 mtspr SPRN_SRR1,r8
203 rfid
204 b . /* prevent speculative execution */
205
David Woodhouse401d1f02005-11-15 18:52:18 +0000206syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000207 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000208 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000209 std r5,_CCR(r1)
210 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000211
Paul Mackerras9994a332005-10-10 22:36:14 +1000212/* Traced system call support */
213syscall_dotrace:
214 bl .save_nvgprs
215 addi r3,r1,STACK_FRAME_OVERHEAD
216 bl .do_syscall_trace_enter
217 ld r0,GPR0(r1) /* Restore original registers */
218 ld r3,GPR3(r1)
219 ld r4,GPR4(r1)
220 ld r5,GPR5(r1)
221 ld r6,GPR6(r1)
222 ld r7,GPR7(r1)
223 ld r8,GPR8(r1)
224 addi r9,r1,STACK_FRAME_OVERHEAD
225 clrrdi r10,r1,THREAD_SHIFT
226 ld r10,TI_FLAGS(r10)
227 b syscall_dotrace_cont
228
David Woodhouse401d1f02005-11-15 18:52:18 +0000229syscall_enosys:
230 li r3,-ENOSYS
231 b syscall_exit
232
233syscall_exit_work:
234 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
235 If TIF_NOERROR is set, just save r3 as it is. */
236
237 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100238 beq+ 0f
239 REST_NVGPRS(r1)
240 b 2f
2410: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000242 blt+ 1f
243 andi. r0,r9,_TIF_NOERROR
244 bne- 1f
245 ld r5,_CCR(r1)
246 neg r3,r3
247 oris r5,r5,0x1000 /* Set SO bit in CR */
248 std r5,_CCR(r1)
2491: std r3,GPR3(r1)
2502: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
251 beq 4f
252
Paul Mackerras1bd79332006-03-08 13:24:22 +1100253 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000254
255 li r11,_TIF_PERSYSCALL_MASK
256 addi r12,r12,TI_FLAGS
2573: ldarx r10,0,r12
258 andc r10,r10,r11
259 stdcx. r10,0,r12
260 bne- 3b
261 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100262
2634: /* Anything else left to do? */
264 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000265 beq .ret_from_except_lite
266
267 /* Re-enable interrupts */
268 mfmsr r10
269 ori r10,r10,MSR_EE
270 mtmsrd r10,1
271
Paul Mackerras1bd79332006-03-08 13:24:22 +1100272 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000273 addi r3,r1,STACK_FRAME_OVERHEAD
274 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100275 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000276
277/* Save non-volatile GPRs, if not already saved. */
278_GLOBAL(save_nvgprs)
279 ld r11,_TRAP(r1)
280 andi. r0,r11,1
281 beqlr-
282 SAVE_NVGPRS(r1)
283 clrrdi r0,r11,1
284 std r0,_TRAP(r1)
285 blr
286
David Woodhouse401d1f02005-11-15 18:52:18 +0000287
Paul Mackerras9994a332005-10-10 22:36:14 +1000288/*
289 * The sigsuspend and rt_sigsuspend system calls can call do_signal
290 * and thus put the process into the stopped state where we might
291 * want to examine its user state with ptrace. Therefore we need
292 * to save all the nonvolatile registers (r14 - r31) before calling
293 * the C code. Similarly, fork, vfork and clone need the full
294 * register state on the stack so that it can be copied to the child.
295 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000296
297_GLOBAL(ppc_fork)
298 bl .save_nvgprs
299 bl .sys_fork
300 b syscall_exit
301
302_GLOBAL(ppc_vfork)
303 bl .save_nvgprs
304 bl .sys_vfork
305 b syscall_exit
306
307_GLOBAL(ppc_clone)
308 bl .save_nvgprs
309 bl .sys_clone
310 b syscall_exit
311
Paul Mackerras1bd79332006-03-08 13:24:22 +1100312_GLOBAL(ppc32_swapcontext)
313 bl .save_nvgprs
314 bl .compat_sys_swapcontext
315 b syscall_exit
316
317_GLOBAL(ppc64_swapcontext)
318 bl .save_nvgprs
319 bl .sys_swapcontext
320 b syscall_exit
321
Paul Mackerras9994a332005-10-10 22:36:14 +1000322_GLOBAL(ret_from_fork)
323 bl .schedule_tail
324 REST_NVGPRS(r1)
325 li r3,0
326 b syscall_exit
327
328/*
329 * This routine switches between two different tasks. The process
330 * state of one is saved on its kernel stack. Then the state
331 * of the other is restored from its kernel stack. The memory
332 * management hardware is updated to the second process's state.
333 * Finally, we can return to the second process, via ret_from_except.
334 * On entry, r3 points to the THREAD for the current task, r4
335 * points to the THREAD for the new task.
336 *
337 * Note: there are two ways to get to the "going out" portion
338 * of this code; either by coming in via the entry (_switch)
339 * or via "fork" which must set up an environment equivalent
340 * to the "_switch" path. If you change this you'll have to change
341 * the fork code also.
342 *
343 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600344 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000345 */
346 .align 7
347_GLOBAL(_switch)
348 mflr r0
349 std r0,16(r1)
350 stdu r1,-SWITCH_FRAME_SIZE(r1)
351 /* r3-r13 are caller saved -- Cort */
352 SAVE_8GPRS(14, r1)
353 SAVE_10GPRS(22, r1)
354 mflr r20 /* Return to switch caller */
355 mfmsr r22
356 li r0, MSR_FP
357#ifdef CONFIG_ALTIVEC
358BEGIN_FTR_SECTION
359 oris r0,r0,MSR_VEC@h /* Disable altivec */
360 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
361 std r24,THREAD_VRSAVE(r3)
362END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
363#endif /* CONFIG_ALTIVEC */
364 and. r0,r0,r22
365 beq+ 1f
366 andc r22,r22,r0
367 mtmsrd r22
368 isync
3691: std r20,_NIP(r1)
370 mfcr r23
371 std r23,_CCR(r1)
372 std r1,KSP(r3) /* Set old stack pointer */
373
374#ifdef CONFIG_SMP
375 /* We need a sync somewhere here to make sure that if the
376 * previous task gets rescheduled on another CPU, it sees all
377 * stores it has performed on this one.
378 */
379 sync
380#endif /* CONFIG_SMP */
381
382 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
383 std r6,PACACURRENT(r13) /* Set new 'current' */
384
385 ld r8,KSP(r4) /* new stack pointer */
386BEGIN_FTR_SECTION
Paul Mackerras1189be62007-10-11 20:37:10 +1000387 b 2f
388END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
389BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000390 clrrdi r6,r8,28 /* get its ESID */
391 clrrdi r9,r1,28 /* get current sp ESID */
Paul Mackerras1189be62007-10-11 20:37:10 +1000392END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
393BEGIN_FTR_SECTION
394 clrrdi r6,r8,40 /* get its 1T ESID */
395 clrrdi r9,r1,40 /* get current sp 1T ESID */
396END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000397 clrldi. r0,r6,2 /* is new ESID c00000000? */
398 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
399 cror eq,4*cr1+eq,eq
400 beq 2f /* if yes, don't slbie it */
401
402 /* Bolt in the new stack SLB entry */
403 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
404 oris r0,r6,(SLB_ESID_V)@h
405 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000406BEGIN_FTR_SECTION
407 li r9,MMU_SEGSIZE_1T /* insert B field */
408 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
409 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
410END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000411
Michael Neuling00efee72007-08-24 16:58:37 +1000412 /* Update the last bolted SLB. No write barriers are needed
413 * here, provided we only update the current CPU's SLB shadow
414 * buffer.
415 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000416 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000417 li r12,0
418 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
419 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
420 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000421
Olof Johanssonf66bce52007-10-16 00:58:59 +1000422 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
423 * we have 1TB segments, the only CPUs known to have the errata
424 * only support less than 1TB of system memory and we'll never
425 * actually hit this code path.
426 */
427
Paul Mackerras9994a332005-10-10 22:36:14 +1000428 slbie r6
429 slbie r6 /* Workaround POWER5 < DD2.1 issue */
430 slbmte r7,r0
431 isync
432
4332:
Paul Mackerras9994a332005-10-10 22:36:14 +1000434 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
435 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
436 because we don't need to leave the 288-byte ABI gap at the
437 top of the kernel stack. */
438 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
439
440 mr r1,r8 /* start using new stack pointer */
441 std r7,PACAKSAVE(r13)
442
443 ld r6,_CCR(r1)
444 mtcrf 0xFF,r6
445
446#ifdef CONFIG_ALTIVEC
447BEGIN_FTR_SECTION
448 ld r0,THREAD_VRSAVE(r4)
449 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
450END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
451#endif /* CONFIG_ALTIVEC */
452
453 /* r3-r13 are destroyed -- Cort */
454 REST_8GPRS(14, r1)
455 REST_10GPRS(22, r1)
456
457 /* convert old thread to its task_struct for return value */
458 addi r3,r3,-THREAD
459 ld r7,_NIP(r1) /* Return to _switch caller in new task */
460 mtlr r7
461 addi r1,r1,SWITCH_FRAME_SIZE
462 blr
463
464 .align 7
465_GLOBAL(ret_from_except)
466 ld r11,_TRAP(r1)
467 andi. r0,r11,1
468 bne .ret_from_except_lite
469 REST_NVGPRS(r1)
470
471_GLOBAL(ret_from_except_lite)
472 /*
473 * Disable interrupts so that current_thread_info()->flags
474 * can't change between when we test it and when we return
475 * from the interrupt.
476 */
477 mfmsr r10 /* Get current interrupt state */
478 rldicl r9,r10,48,1 /* clear MSR_EE */
479 rotldi r9,r9,16
480 mtmsrd r9,1 /* Update machine state */
481
482#ifdef CONFIG_PREEMPT
483 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
484 li r0,_TIF_NEED_RESCHED /* bits to check */
485 ld r3,_MSR(r1)
486 ld r4,TI_FLAGS(r9)
487 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
488 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
489 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
490 bne do_work
491
492#else /* !CONFIG_PREEMPT */
493 ld r3,_MSR(r1) /* Returning to user mode? */
494 andi. r3,r3,MSR_PR
495 beq restore /* if not, just restore regs and return */
496
497 /* Check current_thread_info()->flags */
498 clrrdi r9,r1,THREAD_SHIFT
499 ld r4,TI_FLAGS(r9)
500 andi. r0,r4,_TIF_USER_WORK_MASK
501 bne do_work
502#endif
503
504restore:
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000505 ld r5,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000506#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000507BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000508 cmpdi 0,r5,0
509 beq 4f
510 /* Check for pending interrupts (iSeries) */
David Gibson3356bb92006-01-13 10:26:42 +1100511 ld r3,PACALPPACAPTR(r13)
512 ld r3,LPPACAANYINT(r3)
Paul Mackerras9994a332005-10-10 22:36:14 +1000513 cmpdi r3,0
514 beq+ 4f /* skip do_IRQ if no interrupts */
515
516 li r3,0
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000517 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000518#ifdef CONFIG_TRACE_IRQFLAGS
519 bl .trace_hardirqs_off
520 mfmsr r10
521#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000522 ori r10,r10,MSR_EE
523 mtmsrd r10 /* hard-enable again */
524 addi r3,r1,STACK_FRAME_OVERHEAD
525 bl .do_IRQ
526 b .ret_from_except_lite /* loop back and handle more */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10005274:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000528END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Paul Mackerras9994a332005-10-10 22:36:14 +1000529#endif
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000530 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000531
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000532 /* extract EE bit and use it to restore paca->hard_enabled */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100533 ld r3,_MSR(r1)
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000534 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
535 stb r4,PACAHARDIRQEN(r13)
536
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100537 ld r4,_CTR(r1)
538 ld r0,_LINK(r1)
539 mtctr r4
540 mtlr r0
541 ld r4,_XER(r1)
542 mtspr SPRN_XER,r4
543
544 REST_8GPRS(5, r1)
545
546 andi. r0,r3,MSR_RI
547 beq- unrecov_restore
548
549 stdcx. r0,0,r1 /* to clear the reservation */
550
551 /*
552 * Clear RI before restoring r13. If we are returning to
553 * userspace and we take an exception after restoring r13,
554 * we end up corrupting the userspace r13 value.
555 */
556 mfmsr r4
557 andc r4,r4,r0 /* r0 contains MSR_RI here */
558 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000559
560 /*
561 * r13 is our per cpu area, only restore it if we are returning to
562 * userspace
563 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100564 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000565 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100566 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000567 REST_GPR(13, r1)
5681:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100569 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000570
571 ld r2,_CCR(r1)
572 mtcrf 0xFF,r2
573 ld r2,_NIP(r1)
574 mtspr SPRN_SRR0,r2
575
576 ld r0,GPR0(r1)
577 ld r2,GPR2(r1)
578 ld r3,GPR3(r1)
579 ld r4,GPR4(r1)
580 ld r1,GPR1(r1)
581
582 rfid
583 b . /* prevent speculative execution */
584
Paul Mackerras9994a332005-10-10 22:36:14 +1000585do_work:
586#ifdef CONFIG_PREEMPT
587 andi. r0,r3,MSR_PR /* Returning to user mode? */
588 bne user_work
589 /* Check that preempt_count() == 0 and interrupts are enabled */
590 lwz r8,TI_PREEMPT(r9)
591 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000592 ld r0,SOFTE(r1)
593 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000594 crandc eq,cr1*4+eq,eq
595 bne restore
596 /* here we are preempting the current task */
5971:
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000598#ifdef CONFIG_TRACE_IRQFLAGS
599 bl .trace_hardirqs_on
600 /* Note: we just clobbered r10 which used to contain the previous
601 * MSR before the hard-disabling done by the caller of do_work.
602 * We don't have that value anymore, but it doesn't matter as
603 * we will hard-enable unconditionally, we can just reload the
604 * current MSR into r10
605 */
606 mfmsr r10
607#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000608 li r0,1
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000609 stb r0,PACASOFTIRQEN(r13)
610 stb r0,PACAHARDIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000611 ori r10,r10,MSR_EE
612 mtmsrd r10,1 /* reenable interrupts */
613 bl .preempt_schedule
614 mfmsr r10
615 clrrdi r9,r1,THREAD_SHIFT
616 rldicl r10,r10,48,1 /* disable interrupts again */
617 rotldi r10,r10,16
618 mtmsrd r10,1
619 ld r4,TI_FLAGS(r9)
620 andi. r0,r4,_TIF_NEED_RESCHED
621 bne 1b
622 b restore
623
624user_work:
625#endif
626 /* Enable interrupts */
627 ori r10,r10,MSR_EE
628 mtmsrd r10,1
629
630 andi. r0,r4,_TIF_NEED_RESCHED
631 beq 1f
632 bl .schedule
633 b .ret_from_except_lite
634
6351: bl .save_nvgprs
636 li r3,0
637 addi r4,r1,STACK_FRAME_OVERHEAD
638 bl .do_signal
639 b .ret_from_except
640
641unrecov_restore:
642 addi r3,r1,STACK_FRAME_OVERHEAD
643 bl .unrecoverable_exception
644 b unrecov_restore
645
646#ifdef CONFIG_PPC_RTAS
647/*
648 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
649 * called with the MMU off.
650 *
651 * In addition, we need to be in 32b mode, at least for now.
652 *
653 * Note: r3 is an input parameter to rtas, so don't trash it...
654 */
655_GLOBAL(enter_rtas)
656 mflr r0
657 std r0,16(r1)
658 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
659
660 /* Because RTAS is running in 32b mode, it clobbers the high order half
661 * of all registers that it saves. We therefore save those registers
662 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
663 */
664 SAVE_GPR(2, r1) /* Save the TOC */
665 SAVE_GPR(13, r1) /* Save paca */
666 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
667 SAVE_10GPRS(22, r1) /* ditto */
668
669 mfcr r4
670 std r4,_CCR(r1)
671 mfctr r5
672 std r5,_CTR(r1)
673 mfspr r6,SPRN_XER
674 std r6,_XER(r1)
675 mfdar r7
676 std r7,_DAR(r1)
677 mfdsisr r8
678 std r8,_DSISR(r1)
679 mfsrr0 r9
680 std r9,_SRR0(r1)
681 mfsrr1 r10
682 std r10,_SRR1(r1)
683
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800684 /* Temporary workaround to clear CR until RTAS can be modified to
685 * ignore all bits.
686 */
687 li r0,0
688 mtcr r0
689
David Woodhouse007d88d2007-01-01 18:45:34 +0000690#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000691 /* There is no way it is acceptable to get here with interrupts enabled,
692 * check it with the asm equivalent of WARN_ON
693 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000694 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10006951: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000696 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
697#endif
698
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000699 /* Hard-disable interrupts */
700 mfmsr r6
701 rldicl r7,r6,48,1
702 rotldi r7,r7,16
703 mtmsrd r7,1
704
Paul Mackerras9994a332005-10-10 22:36:14 +1000705 /* Unfortunately, the stack pointer and the MSR are also clobbered,
706 * so they are saved in the PACA which allows us to restore
707 * our original state after RTAS returns.
708 */
709 std r1,PACAR1(r13)
710 std r6,PACASAVEDMSR(r13)
711
712 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100713 LOAD_REG_ADDR(r4,.rtas_return_loc)
714 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000715 mtlr r4
716
717 li r0,0
718 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
719 andc r0,r6,r0
720
721 li r9,1
722 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
723 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
724 andc r6,r0,r9
725 ori r6,r6,MSR_RI
726 sync /* disable interrupts so SRR0/1 */
727 mtmsrd r0 /* don't get trashed */
728
David Gibsone58c3492006-01-13 14:56:25 +1100729 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000730 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
731 ld r4,RTASBASE(r4) /* get the rtas->base value */
732
733 mtspr SPRN_SRR0,r5
734 mtspr SPRN_SRR1,r6
735 rfid
736 b . /* prevent speculative execution */
737
738_STATIC(rtas_return_loc)
739 /* relocation is off at this point */
740 mfspr r4,SPRN_SPRG3 /* Get PACA */
David Gibsone58c3492006-01-13 14:56:25 +1100741 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000742
743 mfmsr r6
744 li r0,MSR_RI
745 andc r6,r6,r0
746 sync
747 mtmsrd r6
748
749 ld r1,PACAR1(r4) /* Restore our SP */
David Gibsone58c3492006-01-13 14:56:25 +1100750 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
Paul Mackerras9994a332005-10-10 22:36:14 +1000751 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
752
753 mtspr SPRN_SRR0,r3
754 mtspr SPRN_SRR1,r4
755 rfid
756 b . /* prevent speculative execution */
757
758_STATIC(rtas_restore_regs)
759 /* relocation is on at this point */
760 REST_GPR(2, r1) /* Restore the TOC */
761 REST_GPR(13, r1) /* Restore paca */
762 REST_8GPRS(14, r1) /* Restore the non-volatiles */
763 REST_10GPRS(22, r1) /* ditto */
764
765 mfspr r13,SPRN_SPRG3
766
767 ld r4,_CCR(r1)
768 mtcr r4
769 ld r5,_CTR(r1)
770 mtctr r5
771 ld r6,_XER(r1)
772 mtspr SPRN_XER,r6
773 ld r7,_DAR(r1)
774 mtdar r7
775 ld r8,_DSISR(r1)
776 mtdsisr r8
777 ld r9,_SRR0(r1)
778 mtsrr0 r9
779 ld r10,_SRR1(r1)
780 mtsrr1 r10
781
782 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
783 ld r0,16(r1) /* get return address */
784
785 mtlr r0
786 blr /* return to caller */
787
788#endif /* CONFIG_PPC_RTAS */
789
Paul Mackerras9994a332005-10-10 22:36:14 +1000790_GLOBAL(enter_prom)
791 mflr r0
792 std r0,16(r1)
793 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
794
795 /* Because PROM is running in 32b mode, it clobbers the high order half
796 * of all registers that it saves. We therefore save those registers
797 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
798 */
799 SAVE_8GPRS(2, r1)
800 SAVE_GPR(13, r1)
801 SAVE_8GPRS(14, r1)
802 SAVE_10GPRS(22, r1)
803 mfcr r4
804 std r4,_CCR(r1)
805 mfctr r5
806 std r5,_CTR(r1)
807 mfspr r6,SPRN_XER
808 std r6,_XER(r1)
809 mfdar r7
810 std r7,_DAR(r1)
811 mfdsisr r8
812 std r8,_DSISR(r1)
813 mfsrr0 r9
814 std r9,_SRR0(r1)
815 mfsrr1 r10
816 std r10,_SRR1(r1)
817 mfmsr r11
818 std r11,_MSR(r1)
819
820 /* Get the PROM entrypoint */
821 ld r0,GPR4(r1)
822 mtlr r0
823
824 /* Switch MSR to 32 bits mode
825 */
826 mfmsr r11
827 li r12,1
828 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
829 andc r11,r11,r12
830 li r12,1
831 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
832 andc r11,r11,r12
833 mtmsrd r11
834 isync
835
836 /* Restore arguments & enter PROM here... */
837 ld r3,GPR3(r1)
838 blrl
839
840 /* Just make sure that r1 top 32 bits didn't get
841 * corrupt by OF
842 */
843 rldicl r1,r1,0,32
844
845 /* Restore the MSR (back to 64 bits) */
846 ld r0,_MSR(r1)
847 mtmsrd r0
848 isync
849
850 /* Restore other registers */
851 REST_GPR(2, r1)
852 REST_GPR(13, r1)
853 REST_8GPRS(14, r1)
854 REST_10GPRS(22, r1)
855 ld r4,_CCR(r1)
856 mtcr r4
857 ld r5,_CTR(r1)
858 mtctr r5
859 ld r6,_XER(r1)
860 mtspr SPRN_XER,r6
861 ld r7,_DAR(r1)
862 mtdar r7
863 ld r8,_DSISR(r1)
864 mtdsisr r8
865 ld r9,_SRR0(r1)
866 mtsrr0 r9
867 ld r10,_SRR1(r1)
868 mtsrr1 r10
869
870 addi r1,r1,PROM_FRAME_SIZE
871 ld r0,16(r1)
872 mtlr r0
873 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -0400874
875#ifdef CONFIG_FTRACE
876#ifdef CONFIG_DYNAMIC_FTRACE
877_GLOBAL(mcount)
878_GLOBAL(_mcount)
879 /* Taken from output of objdump from lib64/glibc */
880 mflr r3
881 stdu r1, -112(r1)
882 std r3, 128(r1)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530883 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400884 .globl mcount_call
885mcount_call:
886 bl ftrace_stub
887 nop
888 ld r0, 128(r1)
889 mtlr r0
890 addi r1, r1, 112
891 blr
892
893_GLOBAL(ftrace_caller)
894 /* Taken from output of objdump from lib64/glibc */
895 mflr r3
896 ld r11, 0(r1)
897 stdu r1, -112(r1)
898 std r3, 128(r1)
899 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530900 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400901.globl ftrace_call
902ftrace_call:
903 bl ftrace_stub
904 nop
905 ld r0, 128(r1)
906 mtlr r0
907 addi r1, r1, 112
908_GLOBAL(ftrace_stub)
909 blr
910#else
911_GLOBAL(mcount)
912 blr
913
914_GLOBAL(_mcount)
915 /* Taken from output of objdump from lib64/glibc */
916 mflr r3
917 ld r11, 0(r1)
918 stdu r1, -112(r1)
919 std r3, 128(r1)
920 ld r4, 16(r11)
921
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530922 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400923 LOAD_REG_ADDR(r5,ftrace_trace_function)
924 ld r5,0(r5)
925 ld r5,0(r5)
926 mtctr r5
927 bctrl
928
929 nop
930 ld r0, 128(r1)
931 mtlr r0
932 addi r1, r1, 112
933_GLOBAL(ftrace_stub)
934 blr
935
936#endif
937#endif