blob: d82878c4daa677c416cf116924b4d34e58d501d3 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100045 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100046
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110064 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerrasab598b62008-11-30 11:49:45 +000065 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */
71 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100072 std r2,GPR2(r1)
73 std r3,GPR3(r1)
74 std r4,GPR4(r1)
75 std r5,GPR5(r1)
76 std r6,GPR6(r1)
77 std r7,GPR7(r1)
78 std r8,GPR8(r1)
79 li r11,0
80 std r11,GPR9(r1)
81 std r11,GPR10(r1)
82 std r11,GPR11(r1)
83 std r11,GPR12(r1)
84 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100085 mfcr r9
86 mflr r10
87 li r11,0xc01
88 std r9,_CCR(r1)
89 std r10,_LINK(r1)
90 std r11,_TRAP(r1)
91 mfxer r9
92 mfctr r10
93 std r9,_XER(r1)
94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000100#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
101BEGIN_FW_FTR_SECTION
102 beq 33f
103 /* if from user, see if there are any DTL entries to process */
104 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
105 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
106 ld r10,LPPACA_DTLIDX(r10) /* get log write index */
107 cmpd cr1,r11,r10
108 beq+ cr1,33f
109 bl .accumulate_stolen_time
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
11433:
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
116#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
117
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000118#ifdef CONFIG_TRACE_IRQFLAGS
119 bl .trace_hardirqs_on
120 REST_GPR(0,r1)
121 REST_4GPRS(3,r1)
122 REST_2GPRS(7,r1)
123 addi r9,r1,STACK_FRAME_OVERHEAD
124 ld r12,_MSR(r1)
125#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000126 li r10,1
127 stb r10,PACASOFTIRQEN(r13)
128 stb r10,PACAHARDIRQEN(r13)
129 std r10,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000130#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000131BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000132 /* Hack for handling interrupts when soft-enabling on iSeries */
133 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
134 andi. r10,r12,MSR_PR /* from kernel */
135 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
Stephen Rothwellc7056772006-11-27 14:59:50 +1100136 bne 2f
137 b hardware_interrupt_entry
1382:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000139END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000140#endif /* CONFIG_PPC_ISERIES */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000141
142 /* Hard enable interrupts */
143#ifdef CONFIG_PPC_BOOK3E
144 wrteei 1
145#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000146 mfmsr r11
147 ori r11,r11,MSR_EE
148 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000149#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000150
151#ifdef SHOW_SYSCALLS
152 bl .do_show_syscall
153 REST_GPR(0,r1)
154 REST_4GPRS(3,r1)
155 REST_2GPRS(7,r1)
156 addi r9,r1,STACK_FRAME_OVERHEAD
157#endif
158 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000159 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000160 andi. r11,r10,_TIF_SYSCALL_T_OR_A
161 bne- syscall_dotrace
162syscall_dotrace_cont:
163 cmpldi 0,r0,NR_syscalls
164 bge- syscall_enosys
165
166system_call: /* label this so stack traces look sane */
167/*
168 * Need to vector to 32 Bit or default sys_call_table here,
169 * based on caller's run-mode / personality.
170 */
171 ld r11,.SYS_CALL_TABLE@toc(2)
172 andi. r10,r10,_TIF_32BIT
173 beq 15f
174 addi r11,r11,8 /* use 32-bit syscall entries */
175 clrldi r3,r3,32
176 clrldi r4,r4,32
177 clrldi r5,r5,32
178 clrldi r6,r6,32
179 clrldi r7,r7,32
180 clrldi r8,r8,32
18115:
182 slwi r0,r0,4
183 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
184 mtctr r10
185 bctrl /* Call handler */
186
187syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000188 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000189#ifdef SHOW_SYSCALLS
190 bl .do_show_syscall_exit
191 ld r3,RESULT(r1)
192#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000193 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000194
Paul Mackerras9994a332005-10-10 22:36:14 +1000195 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000196#ifdef CONFIG_PPC_BOOK3S
197 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000198 andi. r10,r8,MSR_RI
199 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000200#endif
201
202 /* Disable interrupts so current_thread_info()->flags can't change,
203 * and so that we don't get interrupted after loading SRR0/1.
204 */
205#ifdef CONFIG_PPC_BOOK3E
206 wrteei 0
207#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000208 mfmsr r10
209 rldicl r10,r10,48,1
210 rotldi r10,r10,16
211 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000212#endif /* CONFIG_PPC_BOOK3E */
213
Paul Mackerras9994a332005-10-10 22:36:14 +1000214 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000215 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100216 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000217 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000218 cmpld r3,r11
219 ld r5,_CCR(r1)
220 bge- syscall_error
221syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000222 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000223BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000224 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000225END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000226 andi. r6,r8,MSR_PR
227 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100228 /*
229 * Clear RI before restoring r13. If we are returning to
230 * userspace and we take an exception after restoring r13,
231 * we end up corrupting the userspace r13 value.
232 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000233#ifdef CONFIG_PPC_BOOK3S
234 /* No MSR:RI on BookE */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100235 li r12,MSR_RI
236 andc r11,r10,r12
237 mtmsrd r11,1 /* clear MSR.RI */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000238#endif /* CONFIG_PPC_BOOK3S */
239
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100240 beq- 1f
241 ACCOUNT_CPU_USER_EXIT(r11, r12)
242 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002431: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000244 ld r1,GPR1(r1)
245 mtlr r4
246 mtcr r5
247 mtspr SPRN_SRR0,r7
248 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000249 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000250 b . /* prevent speculative execution */
251
David Woodhouse401d1f02005-11-15 18:52:18 +0000252syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000253 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000254 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000255 std r5,_CCR(r1)
256 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000257
Paul Mackerras9994a332005-10-10 22:36:14 +1000258/* Traced system call support */
259syscall_dotrace:
260 bl .save_nvgprs
261 addi r3,r1,STACK_FRAME_OVERHEAD
262 bl .do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000263 /*
264 * Restore argument registers possibly just changed.
265 * We use the return value of do_syscall_trace_enter
266 * for the call number to look up in the table (r0).
267 */
268 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000269 ld r3,GPR3(r1)
270 ld r4,GPR4(r1)
271 ld r5,GPR5(r1)
272 ld r6,GPR6(r1)
273 ld r7,GPR7(r1)
274 ld r8,GPR8(r1)
275 addi r9,r1,STACK_FRAME_OVERHEAD
276 clrrdi r10,r1,THREAD_SHIFT
277 ld r10,TI_FLAGS(r10)
278 b syscall_dotrace_cont
279
David Woodhouse401d1f02005-11-15 18:52:18 +0000280syscall_enosys:
281 li r3,-ENOSYS
282 b syscall_exit
283
284syscall_exit_work:
285 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
286 If TIF_NOERROR is set, just save r3 as it is. */
287
288 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100289 beq+ 0f
290 REST_NVGPRS(r1)
291 b 2f
2920: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000293 blt+ 1f
294 andi. r0,r9,_TIF_NOERROR
295 bne- 1f
296 ld r5,_CCR(r1)
297 neg r3,r3
298 oris r5,r5,0x1000 /* Set SO bit in CR */
299 std r5,_CCR(r1)
3001: std r3,GPR3(r1)
3012: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
302 beq 4f
303
Paul Mackerras1bd79332006-03-08 13:24:22 +1100304 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000305
306 li r11,_TIF_PERSYSCALL_MASK
307 addi r12,r12,TI_FLAGS
3083: ldarx r10,0,r12
309 andc r10,r10,r11
310 stdcx. r10,0,r12
311 bne- 3b
312 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100313
3144: /* Anything else left to do? */
315 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000316 beq .ret_from_except_lite
317
318 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000319#ifdef CONFIG_PPC_BOOK3E
320 wrteei 1
321#else
David Woodhouse401d1f02005-11-15 18:52:18 +0000322 mfmsr r10
323 ori r10,r10,MSR_EE
324 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000325#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000326
Paul Mackerras1bd79332006-03-08 13:24:22 +1100327 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000328 addi r3,r1,STACK_FRAME_OVERHEAD
329 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100330 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000331
332/* Save non-volatile GPRs, if not already saved. */
333_GLOBAL(save_nvgprs)
334 ld r11,_TRAP(r1)
335 andi. r0,r11,1
336 beqlr-
337 SAVE_NVGPRS(r1)
338 clrrdi r0,r11,1
339 std r0,_TRAP(r1)
340 blr
341
David Woodhouse401d1f02005-11-15 18:52:18 +0000342
Paul Mackerras9994a332005-10-10 22:36:14 +1000343/*
344 * The sigsuspend and rt_sigsuspend system calls can call do_signal
345 * and thus put the process into the stopped state where we might
346 * want to examine its user state with ptrace. Therefore we need
347 * to save all the nonvolatile registers (r14 - r31) before calling
348 * the C code. Similarly, fork, vfork and clone need the full
349 * register state on the stack so that it can be copied to the child.
350 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000351
352_GLOBAL(ppc_fork)
353 bl .save_nvgprs
354 bl .sys_fork
355 b syscall_exit
356
357_GLOBAL(ppc_vfork)
358 bl .save_nvgprs
359 bl .sys_vfork
360 b syscall_exit
361
362_GLOBAL(ppc_clone)
363 bl .save_nvgprs
364 bl .sys_clone
365 b syscall_exit
366
Paul Mackerras1bd79332006-03-08 13:24:22 +1100367_GLOBAL(ppc32_swapcontext)
368 bl .save_nvgprs
369 bl .compat_sys_swapcontext
370 b syscall_exit
371
372_GLOBAL(ppc64_swapcontext)
373 bl .save_nvgprs
374 bl .sys_swapcontext
375 b syscall_exit
376
Paul Mackerras9994a332005-10-10 22:36:14 +1000377_GLOBAL(ret_from_fork)
378 bl .schedule_tail
379 REST_NVGPRS(r1)
380 li r3,0
381 b syscall_exit
382
383/*
384 * This routine switches between two different tasks. The process
385 * state of one is saved on its kernel stack. Then the state
386 * of the other is restored from its kernel stack. The memory
387 * management hardware is updated to the second process's state.
388 * Finally, we can return to the second process, via ret_from_except.
389 * On entry, r3 points to the THREAD for the current task, r4
390 * points to the THREAD for the new task.
391 *
392 * Note: there are two ways to get to the "going out" portion
393 * of this code; either by coming in via the entry (_switch)
394 * or via "fork" which must set up an environment equivalent
395 * to the "_switch" path. If you change this you'll have to change
396 * the fork code also.
397 *
398 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600399 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000400 */
401 .align 7
402_GLOBAL(_switch)
403 mflr r0
404 std r0,16(r1)
405 stdu r1,-SWITCH_FRAME_SIZE(r1)
406 /* r3-r13 are caller saved -- Cort */
407 SAVE_8GPRS(14, r1)
408 SAVE_10GPRS(22, r1)
409 mflr r20 /* Return to switch caller */
410 mfmsr r22
411 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000412#ifdef CONFIG_VSX
413BEGIN_FTR_SECTION
414 oris r0,r0,MSR_VSX@h /* Disable VSX */
415END_FTR_SECTION_IFSET(CPU_FTR_VSX)
416#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000417#ifdef CONFIG_ALTIVEC
418BEGIN_FTR_SECTION
419 oris r0,r0,MSR_VEC@h /* Disable altivec */
420 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
421 std r24,THREAD_VRSAVE(r3)
422END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
423#endif /* CONFIG_ALTIVEC */
424 and. r0,r0,r22
425 beq+ 1f
426 andc r22,r22,r0
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000427 MTMSRD(r22)
Paul Mackerras9994a332005-10-10 22:36:14 +1000428 isync
4291: std r20,_NIP(r1)
430 mfcr r23
431 std r23,_CCR(r1)
432 std r1,KSP(r3) /* Set old stack pointer */
433
434#ifdef CONFIG_SMP
435 /* We need a sync somewhere here to make sure that if the
436 * previous task gets rescheduled on another CPU, it sees all
437 * stores it has performed on this one.
438 */
439 sync
440#endif /* CONFIG_SMP */
441
Anton Blanchardf89451f2010-08-11 01:40:27 +0000442 /*
443 * If we optimise away the clear of the reservation in system
444 * calls because we know the CPU tracks the address of the
445 * reservation, then we need to clear it here to cover the
446 * case that the kernel context switch path has no larx
447 * instructions.
448 */
449BEGIN_FTR_SECTION
450 ldarx r6,0,r1
451END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
452
Paul Mackerras9994a332005-10-10 22:36:14 +1000453 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
454 std r6,PACACURRENT(r13) /* Set new 'current' */
455
456 ld r8,KSP(r4) /* new stack pointer */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000457#ifdef CONFIG_PPC_BOOK3S
Paul Mackerras9994a332005-10-10 22:36:14 +1000458BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000459 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000460 clrrdi r6,r8,28 /* get its ESID */
461 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000462 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000463 clrrdi r6,r8,40 /* get its 1T ESID */
464 clrrdi r9,r1,40 /* get current sp 1T ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000465 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
466FTR_SECTION_ELSE
467 b 2f
468ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000469 clrldi. r0,r6,2 /* is new ESID c00000000? */
470 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
471 cror eq,4*cr1+eq,eq
472 beq 2f /* if yes, don't slbie it */
473
474 /* Bolt in the new stack SLB entry */
475 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
476 oris r0,r6,(SLB_ESID_V)@h
477 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000478BEGIN_FTR_SECTION
479 li r9,MMU_SEGSIZE_1T /* insert B field */
480 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
481 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
482END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000483
Michael Neuling00efee72007-08-24 16:58:37 +1000484 /* Update the last bolted SLB. No write barriers are needed
485 * here, provided we only update the current CPU's SLB shadow
486 * buffer.
487 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000488 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000489 li r12,0
490 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
491 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
492 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000493
Olof Johanssonf66bce52007-10-16 00:58:59 +1000494 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
495 * we have 1TB segments, the only CPUs known to have the errata
496 * only support less than 1TB of system memory and we'll never
497 * actually hit this code path.
498 */
499
Paul Mackerras9994a332005-10-10 22:36:14 +1000500 slbie r6
501 slbie r6 /* Workaround POWER5 < DD2.1 issue */
502 slbmte r7,r0
503 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10005042:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000505#endif /* !CONFIG_PPC_BOOK3S */
506
Paul Mackerras9994a332005-10-10 22:36:14 +1000507 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
508 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
509 because we don't need to leave the 288-byte ABI gap at the
510 top of the kernel stack. */
511 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
512
513 mr r1,r8 /* start using new stack pointer */
514 std r7,PACAKSAVE(r13)
515
516 ld r6,_CCR(r1)
517 mtcrf 0xFF,r6
518
519#ifdef CONFIG_ALTIVEC
520BEGIN_FTR_SECTION
521 ld r0,THREAD_VRSAVE(r4)
522 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
523END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
524#endif /* CONFIG_ALTIVEC */
525
526 /* r3-r13 are destroyed -- Cort */
527 REST_8GPRS(14, r1)
528 REST_10GPRS(22, r1)
529
530 /* convert old thread to its task_struct for return value */
531 addi r3,r3,-THREAD
532 ld r7,_NIP(r1) /* Return to _switch caller in new task */
533 mtlr r7
534 addi r1,r1,SWITCH_FRAME_SIZE
535 blr
536
537 .align 7
538_GLOBAL(ret_from_except)
539 ld r11,_TRAP(r1)
540 andi. r0,r11,1
541 bne .ret_from_except_lite
542 REST_NVGPRS(r1)
543
544_GLOBAL(ret_from_except_lite)
545 /*
546 * Disable interrupts so that current_thread_info()->flags
547 * can't change between when we test it and when we return
548 * from the interrupt.
549 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000550#ifdef CONFIG_PPC_BOOK3E
551 wrteei 0
552#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000553 mfmsr r10 /* Get current interrupt state */
554 rldicl r9,r10,48,1 /* clear MSR_EE */
555 rotldi r9,r9,16
556 mtmsrd r9,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000557#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000558
559#ifdef CONFIG_PREEMPT
560 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
561 li r0,_TIF_NEED_RESCHED /* bits to check */
562 ld r3,_MSR(r1)
563 ld r4,TI_FLAGS(r9)
564 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
565 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
566 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
567 bne do_work
568
569#else /* !CONFIG_PREEMPT */
570 ld r3,_MSR(r1) /* Returning to user mode? */
571 andi. r3,r3,MSR_PR
572 beq restore /* if not, just restore regs and return */
573
574 /* Check current_thread_info()->flags */
575 clrrdi r9,r1,THREAD_SHIFT
576 ld r4,TI_FLAGS(r9)
577 andi. r0,r4,_TIF_USER_WORK_MASK
578 bne do_work
579#endif
580
581restore:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000582BEGIN_FW_FTR_SECTION
Michael Ellerman01f38802008-07-16 14:21:34 +1000583 ld r5,SOFTE(r1)
584FW_FTR_SECTION_ELSE
Anton Blanchard917e4072009-10-18 01:24:29 +0000585 b .Liseries_check_pending_irqs
Michael Ellerman01f38802008-07-16 14:21:34 +1000586ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5872:
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000588 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000589
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000590 /* extract EE bit and use it to restore paca->hard_enabled */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100591 ld r3,_MSR(r1)
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000592 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
593 stb r4,PACAHARDIRQEN(r13)
594
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000595#ifdef CONFIG_PPC_BOOK3E
596 b .exception_return_book3e
597#else
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100598 ld r4,_CTR(r1)
599 ld r0,_LINK(r1)
600 mtctr r4
601 mtlr r0
602 ld r4,_XER(r1)
603 mtspr SPRN_XER,r4
604
605 REST_8GPRS(5, r1)
606
607 andi. r0,r3,MSR_RI
608 beq- unrecov_restore
609
Anton Blanchardf89451f2010-08-11 01:40:27 +0000610 /*
611 * Clear the reservation. If we know the CPU tracks the address of
612 * the reservation then we can potentially save some cycles and use
613 * a larx. On POWER6 and POWER7 this is significantly faster.
614 */
615BEGIN_FTR_SECTION
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100616 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000617FTR_SECTION_ELSE
618 ldarx r4,0,r1
619ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100620
621 /*
622 * Clear RI before restoring r13. If we are returning to
623 * userspace and we take an exception after restoring r13,
624 * we end up corrupting the userspace r13 value.
625 */
626 mfmsr r4
627 andc r4,r4,r0 /* r0 contains MSR_RI here */
628 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000629
630 /*
631 * r13 is our per cpu area, only restore it if we are returning to
632 * userspace
633 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100634 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000635 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100636 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000637 REST_GPR(13, r1)
6381:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100639 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000640
641 ld r2,_CCR(r1)
642 mtcrf 0xFF,r2
643 ld r2,_NIP(r1)
644 mtspr SPRN_SRR0,r2
645
646 ld r0,GPR0(r1)
647 ld r2,GPR2(r1)
648 ld r3,GPR3(r1)
649 ld r4,GPR4(r1)
650 ld r1,GPR1(r1)
651
652 rfid
653 b . /* prevent speculative execution */
654
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000655#endif /* CONFIG_PPC_BOOK3E */
656
Anton Blanchard917e4072009-10-18 01:24:29 +0000657.Liseries_check_pending_irqs:
Michael Ellerman01f38802008-07-16 14:21:34 +1000658#ifdef CONFIG_PPC_ISERIES
659 ld r5,SOFTE(r1)
660 cmpdi 0,r5,0
661 beq 2b
662 /* Check for pending interrupts (iSeries) */
663 ld r3,PACALPPACAPTR(r13)
664 ld r3,LPPACAANYINT(r3)
665 cmpdi r3,0
666 beq+ 2b /* skip do_IRQ if no interrupts */
667
668 li r3,0
669 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
670#ifdef CONFIG_TRACE_IRQFLAGS
671 bl .trace_hardirqs_off
672 mfmsr r10
673#endif
674 ori r10,r10,MSR_EE
675 mtmsrd r10 /* hard-enable again */
676 addi r3,r1,STACK_FRAME_OVERHEAD
677 bl .do_IRQ
678 b .ret_from_except_lite /* loop back and handle more */
679#endif
680
Paul Mackerras9994a332005-10-10 22:36:14 +1000681do_work:
682#ifdef CONFIG_PREEMPT
683 andi. r0,r3,MSR_PR /* Returning to user mode? */
684 bne user_work
685 /* Check that preempt_count() == 0 and interrupts are enabled */
686 lwz r8,TI_PREEMPT(r9)
687 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000688 ld r0,SOFTE(r1)
689 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000690 crandc eq,cr1*4+eq,eq
691 bne restore
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000692
693 /* Here we are preempting the current task.
694 *
695 * Ensure interrupts are soft-disabled. We also properly mark
696 * the PACA to reflect the fact that they are hard-disabled
697 * and trace the change
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000698 */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000699 li r0,0
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000700 stb r0,PACASOFTIRQEN(r13)
701 stb r0,PACAHARDIRQEN(r13)
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000702 TRACE_DISABLE_INTS
703
704 /* Call the scheduler with soft IRQs off */
7051: bl .preempt_schedule_irq
706
707 /* Hard-disable interrupts again (and update PACA) */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000708#ifdef CONFIG_PPC_BOOK3E
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000709 wrteei 0
710#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000711 mfmsr r10
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000712 rldicl r10,r10,48,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000713 rotldi r10,r10,16
714 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000715#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000716 li r0,0
717 stb r0,PACAHARDIRQEN(r13)
718
719 /* Re-test flags and eventually loop */
720 clrrdi r9,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000721 ld r4,TI_FLAGS(r9)
722 andi. r0,r4,_TIF_NEED_RESCHED
723 bne 1b
724 b restore
725
726user_work:
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000727#endif /* CONFIG_PREEMPT */
728
Paul Mackerras9994a332005-10-10 22:36:14 +1000729 /* Enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000730#ifdef CONFIG_PPC_BOOK3E
731 wrteei 1
732#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000733 ori r10,r10,MSR_EE
734 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000735#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000736
737 andi. r0,r4,_TIF_NEED_RESCHED
738 beq 1f
739 bl .schedule
740 b .ret_from_except_lite
741
7421: bl .save_nvgprs
Roland McGrath7d6d6372008-07-27 16:52:52 +1000743 addi r3,r1,STACK_FRAME_OVERHEAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000744 bl .do_signal
745 b .ret_from_except
746
747unrecov_restore:
748 addi r3,r1,STACK_FRAME_OVERHEAD
749 bl .unrecoverable_exception
750 b unrecov_restore
751
752#ifdef CONFIG_PPC_RTAS
753/*
754 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
755 * called with the MMU off.
756 *
757 * In addition, we need to be in 32b mode, at least for now.
758 *
759 * Note: r3 is an input parameter to rtas, so don't trash it...
760 */
761_GLOBAL(enter_rtas)
762 mflr r0
763 std r0,16(r1)
764 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
765
766 /* Because RTAS is running in 32b mode, it clobbers the high order half
767 * of all registers that it saves. We therefore save those registers
768 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
769 */
770 SAVE_GPR(2, r1) /* Save the TOC */
771 SAVE_GPR(13, r1) /* Save paca */
772 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
773 SAVE_10GPRS(22, r1) /* ditto */
774
775 mfcr r4
776 std r4,_CCR(r1)
777 mfctr r5
778 std r5,_CTR(r1)
779 mfspr r6,SPRN_XER
780 std r6,_XER(r1)
781 mfdar r7
782 std r7,_DAR(r1)
783 mfdsisr r8
784 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000785
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800786 /* Temporary workaround to clear CR until RTAS can be modified to
787 * ignore all bits.
788 */
789 li r0,0
790 mtcr r0
791
David Woodhouse007d88d2007-01-01 18:45:34 +0000792#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000793 /* There is no way it is acceptable to get here with interrupts enabled,
794 * check it with the asm equivalent of WARN_ON
795 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000796 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10007971: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000798 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
799#endif
800
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000801 /* Hard-disable interrupts */
802 mfmsr r6
803 rldicl r7,r6,48,1
804 rotldi r7,r7,16
805 mtmsrd r7,1
806
Paul Mackerras9994a332005-10-10 22:36:14 +1000807 /* Unfortunately, the stack pointer and the MSR are also clobbered,
808 * so they are saved in the PACA which allows us to restore
809 * our original state after RTAS returns.
810 */
811 std r1,PACAR1(r13)
812 std r6,PACASAVEDMSR(r13)
813
814 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100815 LOAD_REG_ADDR(r4,.rtas_return_loc)
816 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000817 mtlr r4
818
819 li r0,0
820 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
821 andc r0,r6,r0
822
823 li r9,1
824 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Anton Blanchard44c9f3c2010-02-07 19:37:29 +0000825 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
Paul Mackerras9994a332005-10-10 22:36:14 +1000826 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +1000827 sync /* disable interrupts so SRR0/1 */
828 mtmsrd r0 /* don't get trashed */
829
David Gibsone58c3492006-01-13 14:56:25 +1100830 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000831 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
832 ld r4,RTASBASE(r4) /* get the rtas->base value */
833
834 mtspr SPRN_SRR0,r5
835 mtspr SPRN_SRR1,r6
836 rfid
837 b . /* prevent speculative execution */
838
839_STATIC(rtas_return_loc)
840 /* relocation is off at this point */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000841 mfspr r4,SPRN_SPRG_PACA /* Get PACA */
David Gibsone58c3492006-01-13 14:56:25 +1100842 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000843
Paul Mackerrase31aa452008-08-30 11:41:12 +1000844 bcl 20,31,$+4
8450: mflr r3
846 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
847
Paul Mackerras9994a332005-10-10 22:36:14 +1000848 mfmsr r6
849 li r0,MSR_RI
850 andc r6,r6,r0
851 sync
852 mtmsrd r6
853
854 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +1000855 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
856
857 mtspr SPRN_SRR0,r3
858 mtspr SPRN_SRR1,r4
859 rfid
860 b . /* prevent speculative execution */
861
Paul Mackerrase31aa452008-08-30 11:41:12 +1000862 .align 3
8631: .llong .rtas_restore_regs
864
Paul Mackerras9994a332005-10-10 22:36:14 +1000865_STATIC(rtas_restore_regs)
866 /* relocation is on at this point */
867 REST_GPR(2, r1) /* Restore the TOC */
868 REST_GPR(13, r1) /* Restore paca */
869 REST_8GPRS(14, r1) /* Restore the non-volatiles */
870 REST_10GPRS(22, r1) /* ditto */
871
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000872 mfspr r13,SPRN_SPRG_PACA
Paul Mackerras9994a332005-10-10 22:36:14 +1000873
874 ld r4,_CCR(r1)
875 mtcr r4
876 ld r5,_CTR(r1)
877 mtctr r5
878 ld r6,_XER(r1)
879 mtspr SPRN_XER,r6
880 ld r7,_DAR(r1)
881 mtdar r7
882 ld r8,_DSISR(r1)
883 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +1000884
885 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
886 ld r0,16(r1) /* get return address */
887
888 mtlr r0
889 blr /* return to caller */
890
891#endif /* CONFIG_PPC_RTAS */
892
Paul Mackerras9994a332005-10-10 22:36:14 +1000893_GLOBAL(enter_prom)
894 mflr r0
895 std r0,16(r1)
896 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
897
898 /* Because PROM is running in 32b mode, it clobbers the high order half
899 * of all registers that it saves. We therefore save those registers
900 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
901 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000902 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000903 SAVE_GPR(13, r1)
904 SAVE_8GPRS(14, r1)
905 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000906 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +1000907 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000908 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000909 std r11,_MSR(r1)
910
911 /* Get the PROM entrypoint */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000912 mtlr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000913
914 /* Switch MSR to 32 bits mode
915 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000916#ifdef CONFIG_PPC_BOOK3E
917 rlwinm r11,r11,0,1,31
918 mtmsr r11
919#else /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000920 mfmsr r11
921 li r12,1
922 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
923 andc r11,r11,r12
924 li r12,1
925 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
926 andc r11,r11,r12
927 mtmsrd r11
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000928#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000929 isync
930
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000931 /* Enter PROM here... */
Paul Mackerras9994a332005-10-10 22:36:14 +1000932 blrl
933
934 /* Just make sure that r1 top 32 bits didn't get
935 * corrupt by OF
936 */
937 rldicl r1,r1,0,32
938
939 /* Restore the MSR (back to 64 bits) */
940 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000941 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +1000942 isync
943
944 /* Restore other registers */
945 REST_GPR(2, r1)
946 REST_GPR(13, r1)
947 REST_8GPRS(14, r1)
948 REST_10GPRS(22, r1)
949 ld r4,_CCR(r1)
950 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000951
952 addi r1,r1,PROM_FRAME_SIZE
953 ld r0,16(r1)
954 mtlr r0
955 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -0400956
Steven Rostedt606576c2008-10-06 19:06:12 -0400957#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -0400958#ifdef CONFIG_DYNAMIC_FTRACE
959_GLOBAL(mcount)
960_GLOBAL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -0400961 blr
962
963_GLOBAL(ftrace_caller)
964 /* Taken from output of objdump from lib64/glibc */
965 mflr r3
966 ld r11, 0(r1)
967 stdu r1, -112(r1)
968 std r3, 128(r1)
969 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530970 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400971.globl ftrace_call
972ftrace_call:
973 bl ftrace_stub
974 nop
Steven Rostedt46542882009-02-10 22:19:54 -0800975#ifdef CONFIG_FUNCTION_GRAPH_TRACER
976.globl ftrace_graph_call
977ftrace_graph_call:
978 b ftrace_graph_stub
979_GLOBAL(ftrace_graph_stub)
980#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -0400981 ld r0, 128(r1)
982 mtlr r0
983 addi r1, r1, 112
984_GLOBAL(ftrace_stub)
985 blr
986#else
987_GLOBAL(mcount)
988 blr
989
990_GLOBAL(_mcount)
991 /* Taken from output of objdump from lib64/glibc */
992 mflr r3
993 ld r11, 0(r1)
994 stdu r1, -112(r1)
995 std r3, 128(r1)
996 ld r4, 16(r11)
997
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530998 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400999 LOAD_REG_ADDR(r5,ftrace_trace_function)
1000 ld r5,0(r5)
1001 ld r5,0(r5)
1002 mtctr r5
1003 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001004 nop
Steven Rostedt6794c782009-02-09 21:10:27 -08001005
1006
1007#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1008 b ftrace_graph_caller
1009#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001010 ld r0, 128(r1)
1011 mtlr r0
1012 addi r1, r1, 112
1013_GLOBAL(ftrace_stub)
1014 blr
1015
Steven Rostedt6794c782009-02-09 21:10:27 -08001016#endif /* CONFIG_DYNAMIC_FTRACE */
1017
1018#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt46542882009-02-10 22:19:54 -08001019_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001020 /* load r4 with local address */
1021 ld r4, 128(r1)
1022 subi r4, r4, MCOUNT_INSN_SIZE
1023
1024 /* get the parent address */
1025 ld r11, 112(r1)
1026 addi r3, r11, 16
1027
1028 bl .prepare_ftrace_return
1029 nop
1030
1031 ld r0, 128(r1)
1032 mtlr r0
1033 addi r1, r1, 112
1034 blr
1035
1036_GLOBAL(return_to_handler)
1037 /* need to save return values */
Steven Rostedtbb725342009-02-11 12:45:49 -08001038 std r4, -24(r1)
1039 std r3, -16(r1)
1040 std r31, -8(r1)
1041 mr r31, r1
1042 stdu r1, -112(r1)
1043
1044 bl .ftrace_return_to_handler
1045 nop
1046
1047 /* return value has real return address */
1048 mtlr r3
1049
1050 ld r1, 0(r1)
1051 ld r4, -24(r1)
1052 ld r3, -16(r1)
1053 ld r31, -8(r1)
1054
1055 /* Jump back to real return address */
1056 blr
1057
1058_GLOBAL(mod_return_to_handler)
1059 /* need to save return values */
Steven Rostedt6794c782009-02-09 21:10:27 -08001060 std r4, -32(r1)
1061 std r3, -24(r1)
1062 /* save TOC */
1063 std r2, -16(r1)
1064 std r31, -8(r1)
1065 mr r31, r1
1066 stdu r1, -112(r1)
1067
Steven Rostedtbb725342009-02-11 12:45:49 -08001068 /*
1069 * We are in a module using the module's TOC.
1070 * Switch to our TOC to run inside the core kernel.
1071 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001072 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001073
1074 bl .ftrace_return_to_handler
1075 nop
1076
1077 /* return value has real return address */
1078 mtlr r3
1079
1080 ld r1, 0(r1)
1081 ld r4, -32(r1)
1082 ld r3, -24(r1)
1083 ld r2, -16(r1)
1084 ld r31, -8(r1)
1085
1086 /* Jump back to real return address */
1087 blr
1088#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1089#endif /* CONFIG_FUNCTION_TRACER */