blob: 79c089e97ce4a595ed60e7131e3e39fe366966e9 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100045 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100046
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
Anton Blanchardbd19c892006-06-11 01:15:55 +100060 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100061 std r11,_NIP(r1)
62 std r12,_MSR(r1)
63 std r0,GPR0(r1)
64 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110065 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerras9994a332005-10-10 22:36:14 +100066 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100079 mfcr r9
80 mflr r10
81 li r11,0xc01
82 std r9,_CCR(r1)
83 std r10,_LINK(r1)
84 std r11,_TRAP(r1)
85 mfxer r9
86 mfctr r10
87 std r9,_XER(r1)
88 std r10,_CTR(r1)
89 std r3,ORIG_GPR3(r1)
90 ld r2,PACATOC(r13)
91 addi r9,r1,STACK_FRAME_OVERHEAD
92 ld r11,exception_marker@toc(r2)
93 std r11,-16(r9) /* "regshere" marker */
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100094#ifdef CONFIG_TRACE_IRQFLAGS
95 bl .trace_hardirqs_on
96 REST_GPR(0,r1)
97 REST_4GPRS(3,r1)
98 REST_2GPRS(7,r1)
99 addi r9,r1,STACK_FRAME_OVERHEAD
100 ld r12,_MSR(r1)
101#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000102 li r10,1
103 stb r10,PACASOFTIRQEN(r13)
104 stb r10,PACAHARDIRQEN(r13)
105 std r10,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000106#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000107BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000108 /* Hack for handling interrupts when soft-enabling on iSeries */
109 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
110 andi. r10,r12,MSR_PR /* from kernel */
111 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
Stephen Rothwellc7056772006-11-27 14:59:50 +1100112 bne 2f
113 b hardware_interrupt_entry
1142:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000115END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000116#endif /* CONFIG_PPC_ISERIES */
Paul Mackerras9994a332005-10-10 22:36:14 +1000117 mfmsr r11
118 ori r11,r11,MSR_EE
119 mtmsrd r11,1
120
121#ifdef SHOW_SYSCALLS
122 bl .do_show_syscall
123 REST_GPR(0,r1)
124 REST_4GPRS(3,r1)
125 REST_2GPRS(7,r1)
126 addi r9,r1,STACK_FRAME_OVERHEAD
127#endif
128 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000129 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000130 andi. r11,r10,_TIF_SYSCALL_T_OR_A
131 bne- syscall_dotrace
132syscall_dotrace_cont:
133 cmpldi 0,r0,NR_syscalls
134 bge- syscall_enosys
135
136system_call: /* label this so stack traces look sane */
137/*
138 * Need to vector to 32 Bit or default sys_call_table here,
139 * based on caller's run-mode / personality.
140 */
141 ld r11,.SYS_CALL_TABLE@toc(2)
142 andi. r10,r10,_TIF_32BIT
143 beq 15f
144 addi r11,r11,8 /* use 32-bit syscall entries */
145 clrldi r3,r3,32
146 clrldi r4,r4,32
147 clrldi r5,r5,32
148 clrldi r6,r6,32
149 clrldi r7,r7,32
150 clrldi r8,r8,32
15115:
152 slwi r0,r0,4
153 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
154 mtctr r10
155 bctrl /* Call handler */
156
157syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000158 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000159#ifdef SHOW_SYSCALLS
160 bl .do_show_syscall_exit
161 ld r3,RESULT(r1)
162#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000163 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000164
165 /* disable interrupts so current_thread_info()->flags can't change,
166 and so that we don't get interrupted after loading SRR0/1. */
167 ld r8,_MSR(r1)
168 andi. r10,r8,MSR_RI
169 beq- unrecov_restore
170 mfmsr r10
171 rldicl r10,r10,48,1
172 rotldi r10,r10,16
173 mtmsrd r10,1
174 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000175 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100176 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000177 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000178 cmpld r3,r11
179 ld r5,_CCR(r1)
180 bge- syscall_error
181syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000182 ld r7,_NIP(r1)
183 stdcx. r0,0,r1 /* to clear the reservation */
184 andi. r6,r8,MSR_PR
185 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100186 /*
187 * Clear RI before restoring r13. If we are returning to
188 * userspace and we take an exception after restoring r13,
189 * we end up corrupting the userspace r13 value.
190 */
191 li r12,MSR_RI
192 andc r11,r10,r12
193 mtmsrd r11,1 /* clear MSR.RI */
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100194 beq- 1f
195 ACCOUNT_CPU_USER_EXIT(r11, r12)
196 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10001971: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000198 ld r1,GPR1(r1)
199 mtlr r4
200 mtcr r5
201 mtspr SPRN_SRR0,r7
202 mtspr SPRN_SRR1,r8
203 rfid
204 b . /* prevent speculative execution */
205
David Woodhouse401d1f02005-11-15 18:52:18 +0000206syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000207 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000208 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000209 std r5,_CCR(r1)
210 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000211
Paul Mackerras9994a332005-10-10 22:36:14 +1000212/* Traced system call support */
213syscall_dotrace:
214 bl .save_nvgprs
215 addi r3,r1,STACK_FRAME_OVERHEAD
216 bl .do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000217 /*
218 * Restore argument registers possibly just changed.
219 * We use the return value of do_syscall_trace_enter
220 * for the call number to look up in the table (r0).
221 */
222 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000223 ld r3,GPR3(r1)
224 ld r4,GPR4(r1)
225 ld r5,GPR5(r1)
226 ld r6,GPR6(r1)
227 ld r7,GPR7(r1)
228 ld r8,GPR8(r1)
229 addi r9,r1,STACK_FRAME_OVERHEAD
230 clrrdi r10,r1,THREAD_SHIFT
231 ld r10,TI_FLAGS(r10)
232 b syscall_dotrace_cont
233
David Woodhouse401d1f02005-11-15 18:52:18 +0000234syscall_enosys:
235 li r3,-ENOSYS
236 b syscall_exit
237
238syscall_exit_work:
239 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
240 If TIF_NOERROR is set, just save r3 as it is. */
241
242 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100243 beq+ 0f
244 REST_NVGPRS(r1)
245 b 2f
2460: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000247 blt+ 1f
248 andi. r0,r9,_TIF_NOERROR
249 bne- 1f
250 ld r5,_CCR(r1)
251 neg r3,r3
252 oris r5,r5,0x1000 /* Set SO bit in CR */
253 std r5,_CCR(r1)
2541: std r3,GPR3(r1)
2552: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
256 beq 4f
257
Paul Mackerras1bd79332006-03-08 13:24:22 +1100258 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000259
260 li r11,_TIF_PERSYSCALL_MASK
261 addi r12,r12,TI_FLAGS
2623: ldarx r10,0,r12
263 andc r10,r10,r11
264 stdcx. r10,0,r12
265 bne- 3b
266 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100267
2684: /* Anything else left to do? */
269 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000270 beq .ret_from_except_lite
271
272 /* Re-enable interrupts */
273 mfmsr r10
274 ori r10,r10,MSR_EE
275 mtmsrd r10,1
276
Paul Mackerras1bd79332006-03-08 13:24:22 +1100277 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000278 addi r3,r1,STACK_FRAME_OVERHEAD
279 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100280 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000281
282/* Save non-volatile GPRs, if not already saved. */
283_GLOBAL(save_nvgprs)
284 ld r11,_TRAP(r1)
285 andi. r0,r11,1
286 beqlr-
287 SAVE_NVGPRS(r1)
288 clrrdi r0,r11,1
289 std r0,_TRAP(r1)
290 blr
291
David Woodhouse401d1f02005-11-15 18:52:18 +0000292
Paul Mackerras9994a332005-10-10 22:36:14 +1000293/*
294 * The sigsuspend and rt_sigsuspend system calls can call do_signal
295 * and thus put the process into the stopped state where we might
296 * want to examine its user state with ptrace. Therefore we need
297 * to save all the nonvolatile registers (r14 - r31) before calling
298 * the C code. Similarly, fork, vfork and clone need the full
299 * register state on the stack so that it can be copied to the child.
300 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000301
302_GLOBAL(ppc_fork)
303 bl .save_nvgprs
304 bl .sys_fork
305 b syscall_exit
306
307_GLOBAL(ppc_vfork)
308 bl .save_nvgprs
309 bl .sys_vfork
310 b syscall_exit
311
312_GLOBAL(ppc_clone)
313 bl .save_nvgprs
314 bl .sys_clone
315 b syscall_exit
316
Paul Mackerras1bd79332006-03-08 13:24:22 +1100317_GLOBAL(ppc32_swapcontext)
318 bl .save_nvgprs
319 bl .compat_sys_swapcontext
320 b syscall_exit
321
322_GLOBAL(ppc64_swapcontext)
323 bl .save_nvgprs
324 bl .sys_swapcontext
325 b syscall_exit
326
Paul Mackerras9994a332005-10-10 22:36:14 +1000327_GLOBAL(ret_from_fork)
328 bl .schedule_tail
329 REST_NVGPRS(r1)
330 li r3,0
331 b syscall_exit
332
333/*
334 * This routine switches between two different tasks. The process
335 * state of one is saved on its kernel stack. Then the state
336 * of the other is restored from its kernel stack. The memory
337 * management hardware is updated to the second process's state.
338 * Finally, we can return to the second process, via ret_from_except.
339 * On entry, r3 points to the THREAD for the current task, r4
340 * points to the THREAD for the new task.
341 *
342 * Note: there are two ways to get to the "going out" portion
343 * of this code; either by coming in via the entry (_switch)
344 * or via "fork" which must set up an environment equivalent
345 * to the "_switch" path. If you change this you'll have to change
346 * the fork code also.
347 *
348 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600349 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000350 */
351 .align 7
352_GLOBAL(_switch)
353 mflr r0
354 std r0,16(r1)
355 stdu r1,-SWITCH_FRAME_SIZE(r1)
356 /* r3-r13 are caller saved -- Cort */
357 SAVE_8GPRS(14, r1)
358 SAVE_10GPRS(22, r1)
359 mflr r20 /* Return to switch caller */
360 mfmsr r22
361 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000362#ifdef CONFIG_VSX
363BEGIN_FTR_SECTION
364 oris r0,r0,MSR_VSX@h /* Disable VSX */
365END_FTR_SECTION_IFSET(CPU_FTR_VSX)
366#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000367#ifdef CONFIG_ALTIVEC
368BEGIN_FTR_SECTION
369 oris r0,r0,MSR_VEC@h /* Disable altivec */
370 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
371 std r24,THREAD_VRSAVE(r3)
372END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
373#endif /* CONFIG_ALTIVEC */
374 and. r0,r0,r22
375 beq+ 1f
376 andc r22,r22,r0
377 mtmsrd r22
378 isync
3791: std r20,_NIP(r1)
380 mfcr r23
381 std r23,_CCR(r1)
382 std r1,KSP(r3) /* Set old stack pointer */
383
384#ifdef CONFIG_SMP
385 /* We need a sync somewhere here to make sure that if the
386 * previous task gets rescheduled on another CPU, it sees all
387 * stores it has performed on this one.
388 */
389 sync
390#endif /* CONFIG_SMP */
391
392 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
393 std r6,PACACURRENT(r13) /* Set new 'current' */
394
395 ld r8,KSP(r4) /* new stack pointer */
396BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000397 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000398 clrrdi r6,r8,28 /* get its ESID */
399 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000400 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000401 clrrdi r6,r8,40 /* get its 1T ESID */
402 clrrdi r9,r1,40 /* get current sp 1T ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000403 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
404FTR_SECTION_ELSE
405 b 2f
406ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000407 clrldi. r0,r6,2 /* is new ESID c00000000? */
408 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
409 cror eq,4*cr1+eq,eq
410 beq 2f /* if yes, don't slbie it */
411
412 /* Bolt in the new stack SLB entry */
413 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
414 oris r0,r6,(SLB_ESID_V)@h
415 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000416BEGIN_FTR_SECTION
417 li r9,MMU_SEGSIZE_1T /* insert B field */
418 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
419 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
420END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000421
Michael Neuling00efee72007-08-24 16:58:37 +1000422 /* Update the last bolted SLB. No write barriers are needed
423 * here, provided we only update the current CPU's SLB shadow
424 * buffer.
425 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000426 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000427 li r12,0
428 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
429 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
430 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000431
Olof Johanssonf66bce52007-10-16 00:58:59 +1000432 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
433 * we have 1TB segments, the only CPUs known to have the errata
434 * only support less than 1TB of system memory and we'll never
435 * actually hit this code path.
436 */
437
Paul Mackerras9994a332005-10-10 22:36:14 +1000438 slbie r6
439 slbie r6 /* Workaround POWER5 < DD2.1 issue */
440 slbmte r7,r0
441 isync
442
4432:
Paul Mackerras9994a332005-10-10 22:36:14 +1000444 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
445 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
446 because we don't need to leave the 288-byte ABI gap at the
447 top of the kernel stack. */
448 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
449
450 mr r1,r8 /* start using new stack pointer */
451 std r7,PACAKSAVE(r13)
452
453 ld r6,_CCR(r1)
454 mtcrf 0xFF,r6
455
456#ifdef CONFIG_ALTIVEC
457BEGIN_FTR_SECTION
458 ld r0,THREAD_VRSAVE(r4)
459 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
460END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
461#endif /* CONFIG_ALTIVEC */
462
463 /* r3-r13 are destroyed -- Cort */
464 REST_8GPRS(14, r1)
465 REST_10GPRS(22, r1)
466
467 /* convert old thread to its task_struct for return value */
468 addi r3,r3,-THREAD
469 ld r7,_NIP(r1) /* Return to _switch caller in new task */
470 mtlr r7
471 addi r1,r1,SWITCH_FRAME_SIZE
472 blr
473
474 .align 7
475_GLOBAL(ret_from_except)
476 ld r11,_TRAP(r1)
477 andi. r0,r11,1
478 bne .ret_from_except_lite
479 REST_NVGPRS(r1)
480
481_GLOBAL(ret_from_except_lite)
482 /*
483 * Disable interrupts so that current_thread_info()->flags
484 * can't change between when we test it and when we return
485 * from the interrupt.
486 */
487 mfmsr r10 /* Get current interrupt state */
488 rldicl r9,r10,48,1 /* clear MSR_EE */
489 rotldi r9,r9,16
490 mtmsrd r9,1 /* Update machine state */
491
492#ifdef CONFIG_PREEMPT
493 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
494 li r0,_TIF_NEED_RESCHED /* bits to check */
495 ld r3,_MSR(r1)
496 ld r4,TI_FLAGS(r9)
497 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
498 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
499 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
500 bne do_work
501
502#else /* !CONFIG_PREEMPT */
503 ld r3,_MSR(r1) /* Returning to user mode? */
504 andi. r3,r3,MSR_PR
505 beq restore /* if not, just restore regs and return */
506
507 /* Check current_thread_info()->flags */
508 clrrdi r9,r1,THREAD_SHIFT
509 ld r4,TI_FLAGS(r9)
510 andi. r0,r4,_TIF_USER_WORK_MASK
511 bne do_work
512#endif
513
514restore:
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000515 ld r5,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000516#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000517BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000518 cmpdi 0,r5,0
519 beq 4f
520 /* Check for pending interrupts (iSeries) */
David Gibson3356bb92006-01-13 10:26:42 +1100521 ld r3,PACALPPACAPTR(r13)
522 ld r3,LPPACAANYINT(r3)
Paul Mackerras9994a332005-10-10 22:36:14 +1000523 cmpdi r3,0
524 beq+ 4f /* skip do_IRQ if no interrupts */
525
526 li r3,0
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000527 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000528#ifdef CONFIG_TRACE_IRQFLAGS
529 bl .trace_hardirqs_off
530 mfmsr r10
531#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000532 ori r10,r10,MSR_EE
533 mtmsrd r10 /* hard-enable again */
534 addi r3,r1,STACK_FRAME_OVERHEAD
535 bl .do_IRQ
536 b .ret_from_except_lite /* loop back and handle more */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10005374:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000538END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Paul Mackerras9994a332005-10-10 22:36:14 +1000539#endif
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000540 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000541
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000542 /* extract EE bit and use it to restore paca->hard_enabled */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100543 ld r3,_MSR(r1)
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000544 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
545 stb r4,PACAHARDIRQEN(r13)
546
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100547 ld r4,_CTR(r1)
548 ld r0,_LINK(r1)
549 mtctr r4
550 mtlr r0
551 ld r4,_XER(r1)
552 mtspr SPRN_XER,r4
553
554 REST_8GPRS(5, r1)
555
556 andi. r0,r3,MSR_RI
557 beq- unrecov_restore
558
559 stdcx. r0,0,r1 /* to clear the reservation */
560
561 /*
562 * Clear RI before restoring r13. If we are returning to
563 * userspace and we take an exception after restoring r13,
564 * we end up corrupting the userspace r13 value.
565 */
566 mfmsr r4
567 andc r4,r4,r0 /* r0 contains MSR_RI here */
568 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000569
570 /*
571 * r13 is our per cpu area, only restore it if we are returning to
572 * userspace
573 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100574 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000575 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100576 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000577 REST_GPR(13, r1)
5781:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100579 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000580
581 ld r2,_CCR(r1)
582 mtcrf 0xFF,r2
583 ld r2,_NIP(r1)
584 mtspr SPRN_SRR0,r2
585
586 ld r0,GPR0(r1)
587 ld r2,GPR2(r1)
588 ld r3,GPR3(r1)
589 ld r4,GPR4(r1)
590 ld r1,GPR1(r1)
591
592 rfid
593 b . /* prevent speculative execution */
594
Paul Mackerras9994a332005-10-10 22:36:14 +1000595do_work:
596#ifdef CONFIG_PREEMPT
597 andi. r0,r3,MSR_PR /* Returning to user mode? */
598 bne user_work
599 /* Check that preempt_count() == 0 and interrupts are enabled */
600 lwz r8,TI_PREEMPT(r9)
601 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000602 ld r0,SOFTE(r1)
603 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000604 crandc eq,cr1*4+eq,eq
605 bne restore
606 /* here we are preempting the current task */
6071:
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000608#ifdef CONFIG_TRACE_IRQFLAGS
609 bl .trace_hardirqs_on
610 /* Note: we just clobbered r10 which used to contain the previous
611 * MSR before the hard-disabling done by the caller of do_work.
612 * We don't have that value anymore, but it doesn't matter as
613 * we will hard-enable unconditionally, we can just reload the
614 * current MSR into r10
615 */
616 mfmsr r10
617#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000618 li r0,1
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000619 stb r0,PACASOFTIRQEN(r13)
620 stb r0,PACAHARDIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000621 ori r10,r10,MSR_EE
622 mtmsrd r10,1 /* reenable interrupts */
623 bl .preempt_schedule
624 mfmsr r10
625 clrrdi r9,r1,THREAD_SHIFT
626 rldicl r10,r10,48,1 /* disable interrupts again */
627 rotldi r10,r10,16
628 mtmsrd r10,1
629 ld r4,TI_FLAGS(r9)
630 andi. r0,r4,_TIF_NEED_RESCHED
631 bne 1b
632 b restore
633
634user_work:
635#endif
636 /* Enable interrupts */
637 ori r10,r10,MSR_EE
638 mtmsrd r10,1
639
640 andi. r0,r4,_TIF_NEED_RESCHED
641 beq 1f
642 bl .schedule
643 b .ret_from_except_lite
644
6451: bl .save_nvgprs
646 li r3,0
647 addi r4,r1,STACK_FRAME_OVERHEAD
648 bl .do_signal
649 b .ret_from_except
650
651unrecov_restore:
652 addi r3,r1,STACK_FRAME_OVERHEAD
653 bl .unrecoverable_exception
654 b unrecov_restore
655
656#ifdef CONFIG_PPC_RTAS
657/*
658 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
659 * called with the MMU off.
660 *
661 * In addition, we need to be in 32b mode, at least for now.
662 *
663 * Note: r3 is an input parameter to rtas, so don't trash it...
664 */
665_GLOBAL(enter_rtas)
666 mflr r0
667 std r0,16(r1)
668 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
669
670 /* Because RTAS is running in 32b mode, it clobbers the high order half
671 * of all registers that it saves. We therefore save those registers
672 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
673 */
674 SAVE_GPR(2, r1) /* Save the TOC */
675 SAVE_GPR(13, r1) /* Save paca */
676 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
677 SAVE_10GPRS(22, r1) /* ditto */
678
679 mfcr r4
680 std r4,_CCR(r1)
681 mfctr r5
682 std r5,_CTR(r1)
683 mfspr r6,SPRN_XER
684 std r6,_XER(r1)
685 mfdar r7
686 std r7,_DAR(r1)
687 mfdsisr r8
688 std r8,_DSISR(r1)
689 mfsrr0 r9
690 std r9,_SRR0(r1)
691 mfsrr1 r10
692 std r10,_SRR1(r1)
693
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800694 /* Temporary workaround to clear CR until RTAS can be modified to
695 * ignore all bits.
696 */
697 li r0,0
698 mtcr r0
699
David Woodhouse007d88d2007-01-01 18:45:34 +0000700#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000701 /* There is no way it is acceptable to get here with interrupts enabled,
702 * check it with the asm equivalent of WARN_ON
703 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000704 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10007051: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000706 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
707#endif
708
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000709 /* Hard-disable interrupts */
710 mfmsr r6
711 rldicl r7,r6,48,1
712 rotldi r7,r7,16
713 mtmsrd r7,1
714
Paul Mackerras9994a332005-10-10 22:36:14 +1000715 /* Unfortunately, the stack pointer and the MSR are also clobbered,
716 * so they are saved in the PACA which allows us to restore
717 * our original state after RTAS returns.
718 */
719 std r1,PACAR1(r13)
720 std r6,PACASAVEDMSR(r13)
721
722 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100723 LOAD_REG_ADDR(r4,.rtas_return_loc)
724 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000725 mtlr r4
726
727 li r0,0
728 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
729 andc r0,r6,r0
730
731 li r9,1
732 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
733 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
734 andc r6,r0,r9
735 ori r6,r6,MSR_RI
736 sync /* disable interrupts so SRR0/1 */
737 mtmsrd r0 /* don't get trashed */
738
David Gibsone58c3492006-01-13 14:56:25 +1100739 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000740 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
741 ld r4,RTASBASE(r4) /* get the rtas->base value */
742
743 mtspr SPRN_SRR0,r5
744 mtspr SPRN_SRR1,r6
745 rfid
746 b . /* prevent speculative execution */
747
748_STATIC(rtas_return_loc)
749 /* relocation is off at this point */
750 mfspr r4,SPRN_SPRG3 /* Get PACA */
David Gibsone58c3492006-01-13 14:56:25 +1100751 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000752
753 mfmsr r6
754 li r0,MSR_RI
755 andc r6,r6,r0
756 sync
757 mtmsrd r6
758
759 ld r1,PACAR1(r4) /* Restore our SP */
David Gibsone58c3492006-01-13 14:56:25 +1100760 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
Paul Mackerras9994a332005-10-10 22:36:14 +1000761 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
762
763 mtspr SPRN_SRR0,r3
764 mtspr SPRN_SRR1,r4
765 rfid
766 b . /* prevent speculative execution */
767
768_STATIC(rtas_restore_regs)
769 /* relocation is on at this point */
770 REST_GPR(2, r1) /* Restore the TOC */
771 REST_GPR(13, r1) /* Restore paca */
772 REST_8GPRS(14, r1) /* Restore the non-volatiles */
773 REST_10GPRS(22, r1) /* ditto */
774
775 mfspr r13,SPRN_SPRG3
776
777 ld r4,_CCR(r1)
778 mtcr r4
779 ld r5,_CTR(r1)
780 mtctr r5
781 ld r6,_XER(r1)
782 mtspr SPRN_XER,r6
783 ld r7,_DAR(r1)
784 mtdar r7
785 ld r8,_DSISR(r1)
786 mtdsisr r8
787 ld r9,_SRR0(r1)
788 mtsrr0 r9
789 ld r10,_SRR1(r1)
790 mtsrr1 r10
791
792 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
793 ld r0,16(r1) /* get return address */
794
795 mtlr r0
796 blr /* return to caller */
797
798#endif /* CONFIG_PPC_RTAS */
799
Paul Mackerras9994a332005-10-10 22:36:14 +1000800_GLOBAL(enter_prom)
801 mflr r0
802 std r0,16(r1)
803 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
804
805 /* Because PROM is running in 32b mode, it clobbers the high order half
806 * of all registers that it saves. We therefore save those registers
807 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
808 */
809 SAVE_8GPRS(2, r1)
810 SAVE_GPR(13, r1)
811 SAVE_8GPRS(14, r1)
812 SAVE_10GPRS(22, r1)
813 mfcr r4
814 std r4,_CCR(r1)
815 mfctr r5
816 std r5,_CTR(r1)
817 mfspr r6,SPRN_XER
818 std r6,_XER(r1)
819 mfdar r7
820 std r7,_DAR(r1)
821 mfdsisr r8
822 std r8,_DSISR(r1)
823 mfsrr0 r9
824 std r9,_SRR0(r1)
825 mfsrr1 r10
826 std r10,_SRR1(r1)
827 mfmsr r11
828 std r11,_MSR(r1)
829
830 /* Get the PROM entrypoint */
831 ld r0,GPR4(r1)
832 mtlr r0
833
834 /* Switch MSR to 32 bits mode
835 */
836 mfmsr r11
837 li r12,1
838 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
839 andc r11,r11,r12
840 li r12,1
841 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
842 andc r11,r11,r12
843 mtmsrd r11
844 isync
845
846 /* Restore arguments & enter PROM here... */
847 ld r3,GPR3(r1)
848 blrl
849
850 /* Just make sure that r1 top 32 bits didn't get
851 * corrupt by OF
852 */
853 rldicl r1,r1,0,32
854
855 /* Restore the MSR (back to 64 bits) */
856 ld r0,_MSR(r1)
857 mtmsrd r0
858 isync
859
860 /* Restore other registers */
861 REST_GPR(2, r1)
862 REST_GPR(13, r1)
863 REST_8GPRS(14, r1)
864 REST_10GPRS(22, r1)
865 ld r4,_CCR(r1)
866 mtcr r4
867 ld r5,_CTR(r1)
868 mtctr r5
869 ld r6,_XER(r1)
870 mtspr SPRN_XER,r6
871 ld r7,_DAR(r1)
872 mtdar r7
873 ld r8,_DSISR(r1)
874 mtdsisr r8
875 ld r9,_SRR0(r1)
876 mtsrr0 r9
877 ld r10,_SRR1(r1)
878 mtsrr1 r10
879
880 addi r1,r1,PROM_FRAME_SIZE
881 ld r0,16(r1)
882 mtlr r0
883 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -0400884
885#ifdef CONFIG_FTRACE
886#ifdef CONFIG_DYNAMIC_FTRACE
887_GLOBAL(mcount)
888_GLOBAL(_mcount)
889 /* Taken from output of objdump from lib64/glibc */
890 mflr r3
891 stdu r1, -112(r1)
892 std r3, 128(r1)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530893 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400894 .globl mcount_call
895mcount_call:
896 bl ftrace_stub
897 nop
898 ld r0, 128(r1)
899 mtlr r0
900 addi r1, r1, 112
901 blr
902
903_GLOBAL(ftrace_caller)
904 /* Taken from output of objdump from lib64/glibc */
905 mflr r3
906 ld r11, 0(r1)
907 stdu r1, -112(r1)
908 std r3, 128(r1)
909 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530910 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400911.globl ftrace_call
912ftrace_call:
913 bl ftrace_stub
914 nop
915 ld r0, 128(r1)
916 mtlr r0
917 addi r1, r1, 112
918_GLOBAL(ftrace_stub)
919 blr
920#else
921_GLOBAL(mcount)
922 blr
923
924_GLOBAL(_mcount)
925 /* Taken from output of objdump from lib64/glibc */
926 mflr r3
927 ld r11, 0(r1)
928 stdu r1, -112(r1)
929 std r3, 128(r1)
930 ld r4, 16(r11)
931
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530932 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400933 LOAD_REG_ADDR(r5,ftrace_trace_function)
934 ld r5,0(r5)
935 ld r5,0(r5)
936 mtctr r5
937 bctrl
938
939 nop
940 ld r0, 128(r1)
941 mtlr r0
942 addi r1, r1, 112
943_GLOBAL(ftrace_stub)
944 blr
945
946#endif
947#endif