blob: 64693706ebfd4833e8ab0ef601c7a638d40d85c3 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100045 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100046
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110064 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerrasab598b62008-11-30 11:49:45 +000065 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */
71 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100072 std r2,GPR2(r1)
73 std r3,GPR3(r1)
74 std r4,GPR4(r1)
75 std r5,GPR5(r1)
76 std r6,GPR6(r1)
77 std r7,GPR7(r1)
78 std r8,GPR8(r1)
79 li r11,0
80 std r11,GPR9(r1)
81 std r11,GPR10(r1)
82 std r11,GPR11(r1)
83 std r11,GPR12(r1)
84 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100085 mfcr r9
86 mflr r10
87 li r11,0xc01
88 std r9,_CCR(r1)
89 std r10,_LINK(r1)
90 std r11,_TRAP(r1)
91 mfxer r9
92 mfctr r10
93 std r9,_XER(r1)
94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000100#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
101BEGIN_FW_FTR_SECTION
102 beq 33f
103 /* if from user, see if there are any DTL entries to process */
104 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
105 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
106 ld r10,LPPACA_DTLIDX(r10) /* get log write index */
107 cmpd cr1,r11,r10
108 beq+ cr1,33f
109 bl .accumulate_stolen_time
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
11433:
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
116#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
117
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000118#ifdef CONFIG_TRACE_IRQFLAGS
119 bl .trace_hardirqs_on
120 REST_GPR(0,r1)
121 REST_4GPRS(3,r1)
122 REST_2GPRS(7,r1)
123 addi r9,r1,STACK_FRAME_OVERHEAD
124 ld r12,_MSR(r1)
125#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000126 li r10,1
127 stb r10,PACASOFTIRQEN(r13)
128 stb r10,PACAHARDIRQEN(r13)
129 std r10,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000130#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000131BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000132 /* Hack for handling interrupts when soft-enabling on iSeries */
133 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
134 andi. r10,r12,MSR_PR /* from kernel */
135 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
Stephen Rothwellc7056772006-11-27 14:59:50 +1100136 bne 2f
137 b hardware_interrupt_entry
1382:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000139END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000140#endif /* CONFIG_PPC_ISERIES */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000141
142 /* Hard enable interrupts */
143#ifdef CONFIG_PPC_BOOK3E
144 wrteei 1
145#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000146 mfmsr r11
147 ori r11,r11,MSR_EE
148 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000149#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000150
151#ifdef SHOW_SYSCALLS
152 bl .do_show_syscall
153 REST_GPR(0,r1)
154 REST_4GPRS(3,r1)
155 REST_2GPRS(7,r1)
156 addi r9,r1,STACK_FRAME_OVERHEAD
157#endif
158 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000159 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000160 andi. r11,r10,_TIF_SYSCALL_T_OR_A
161 bne- syscall_dotrace
162syscall_dotrace_cont:
163 cmpldi 0,r0,NR_syscalls
164 bge- syscall_enosys
165
166system_call: /* label this so stack traces look sane */
167/*
168 * Need to vector to 32 Bit or default sys_call_table here,
169 * based on caller's run-mode / personality.
170 */
171 ld r11,.SYS_CALL_TABLE@toc(2)
172 andi. r10,r10,_TIF_32BIT
173 beq 15f
174 addi r11,r11,8 /* use 32-bit syscall entries */
175 clrldi r3,r3,32
176 clrldi r4,r4,32
177 clrldi r5,r5,32
178 clrldi r6,r6,32
179 clrldi r7,r7,32
180 clrldi r8,r8,32
18115:
182 slwi r0,r0,4
183 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
184 mtctr r10
185 bctrl /* Call handler */
186
187syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000188 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000189#ifdef SHOW_SYSCALLS
190 bl .do_show_syscall_exit
191 ld r3,RESULT(r1)
192#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000193 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000194
Paul Mackerras9994a332005-10-10 22:36:14 +1000195 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000196#ifdef CONFIG_PPC_BOOK3S
197 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000198 andi. r10,r8,MSR_RI
199 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000200#endif
201
202 /* Disable interrupts so current_thread_info()->flags can't change,
203 * and so that we don't get interrupted after loading SRR0/1.
204 */
205#ifdef CONFIG_PPC_BOOK3E
206 wrteei 0
207#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000208 mfmsr r10
209 rldicl r10,r10,48,1
210 rotldi r10,r10,16
211 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000212#endif /* CONFIG_PPC_BOOK3E */
213
Paul Mackerras9994a332005-10-10 22:36:14 +1000214 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000215 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100216 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000217 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000218 cmpld r3,r11
219 ld r5,_CCR(r1)
220 bge- syscall_error
221syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000222 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000223BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000224 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000225END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000226 andi. r6,r8,MSR_PR
227 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100228 /*
229 * Clear RI before restoring r13. If we are returning to
230 * userspace and we take an exception after restoring r13,
231 * we end up corrupting the userspace r13 value.
232 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000233#ifdef CONFIG_PPC_BOOK3S
234 /* No MSR:RI on BookE */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100235 li r12,MSR_RI
236 andc r11,r10,r12
237 mtmsrd r11,1 /* clear MSR.RI */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000238#endif /* CONFIG_PPC_BOOK3S */
239
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100240 beq- 1f
241 ACCOUNT_CPU_USER_EXIT(r11, r12)
242 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002431: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000244 ld r1,GPR1(r1)
245 mtlr r4
246 mtcr r5
247 mtspr SPRN_SRR0,r7
248 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000249 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000250 b . /* prevent speculative execution */
251
David Woodhouse401d1f02005-11-15 18:52:18 +0000252syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000253 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000254 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000255 std r5,_CCR(r1)
256 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000257
Paul Mackerras9994a332005-10-10 22:36:14 +1000258/* Traced system call support */
259syscall_dotrace:
260 bl .save_nvgprs
261 addi r3,r1,STACK_FRAME_OVERHEAD
262 bl .do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000263 /*
264 * Restore argument registers possibly just changed.
265 * We use the return value of do_syscall_trace_enter
266 * for the call number to look up in the table (r0).
267 */
268 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000269 ld r3,GPR3(r1)
270 ld r4,GPR4(r1)
271 ld r5,GPR5(r1)
272 ld r6,GPR6(r1)
273 ld r7,GPR7(r1)
274 ld r8,GPR8(r1)
275 addi r9,r1,STACK_FRAME_OVERHEAD
276 clrrdi r10,r1,THREAD_SHIFT
277 ld r10,TI_FLAGS(r10)
278 b syscall_dotrace_cont
279
David Woodhouse401d1f02005-11-15 18:52:18 +0000280syscall_enosys:
281 li r3,-ENOSYS
282 b syscall_exit
283
284syscall_exit_work:
285 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
286 If TIF_NOERROR is set, just save r3 as it is. */
287
288 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100289 beq+ 0f
290 REST_NVGPRS(r1)
291 b 2f
2920: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000293 blt+ 1f
294 andi. r0,r9,_TIF_NOERROR
295 bne- 1f
296 ld r5,_CCR(r1)
297 neg r3,r3
298 oris r5,r5,0x1000 /* Set SO bit in CR */
299 std r5,_CCR(r1)
3001: std r3,GPR3(r1)
3012: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
302 beq 4f
303
Paul Mackerras1bd79332006-03-08 13:24:22 +1100304 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000305
306 li r11,_TIF_PERSYSCALL_MASK
307 addi r12,r12,TI_FLAGS
3083: ldarx r10,0,r12
309 andc r10,r10,r11
310 stdcx. r10,0,r12
311 bne- 3b
312 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100313
3144: /* Anything else left to do? */
315 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000316 beq .ret_from_except_lite
317
318 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000319#ifdef CONFIG_PPC_BOOK3E
320 wrteei 1
321#else
David Woodhouse401d1f02005-11-15 18:52:18 +0000322 mfmsr r10
323 ori r10,r10,MSR_EE
324 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000325#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000326
Paul Mackerras1bd79332006-03-08 13:24:22 +1100327 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000328 addi r3,r1,STACK_FRAME_OVERHEAD
329 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100330 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000331
332/* Save non-volatile GPRs, if not already saved. */
333_GLOBAL(save_nvgprs)
334 ld r11,_TRAP(r1)
335 andi. r0,r11,1
336 beqlr-
337 SAVE_NVGPRS(r1)
338 clrrdi r0,r11,1
339 std r0,_TRAP(r1)
340 blr
341
David Woodhouse401d1f02005-11-15 18:52:18 +0000342
Paul Mackerras9994a332005-10-10 22:36:14 +1000343/*
344 * The sigsuspend and rt_sigsuspend system calls can call do_signal
345 * and thus put the process into the stopped state where we might
346 * want to examine its user state with ptrace. Therefore we need
347 * to save all the nonvolatile registers (r14 - r31) before calling
348 * the C code. Similarly, fork, vfork and clone need the full
349 * register state on the stack so that it can be copied to the child.
350 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000351
352_GLOBAL(ppc_fork)
353 bl .save_nvgprs
354 bl .sys_fork
355 b syscall_exit
356
357_GLOBAL(ppc_vfork)
358 bl .save_nvgprs
359 bl .sys_vfork
360 b syscall_exit
361
362_GLOBAL(ppc_clone)
363 bl .save_nvgprs
364 bl .sys_clone
365 b syscall_exit
366
Paul Mackerras1bd79332006-03-08 13:24:22 +1100367_GLOBAL(ppc32_swapcontext)
368 bl .save_nvgprs
369 bl .compat_sys_swapcontext
370 b syscall_exit
371
372_GLOBAL(ppc64_swapcontext)
373 bl .save_nvgprs
374 bl .sys_swapcontext
375 b syscall_exit
376
Paul Mackerras9994a332005-10-10 22:36:14 +1000377_GLOBAL(ret_from_fork)
378 bl .schedule_tail
379 REST_NVGPRS(r1)
380 li r3,0
381 b syscall_exit
382
383/*
384 * This routine switches between two different tasks. The process
385 * state of one is saved on its kernel stack. Then the state
386 * of the other is restored from its kernel stack. The memory
387 * management hardware is updated to the second process's state.
388 * Finally, we can return to the second process, via ret_from_except.
389 * On entry, r3 points to the THREAD for the current task, r4
390 * points to the THREAD for the new task.
391 *
392 * Note: there are two ways to get to the "going out" portion
393 * of this code; either by coming in via the entry (_switch)
394 * or via "fork" which must set up an environment equivalent
395 * to the "_switch" path. If you change this you'll have to change
396 * the fork code also.
397 *
398 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600399 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000400 */
401 .align 7
402_GLOBAL(_switch)
403 mflr r0
404 std r0,16(r1)
405 stdu r1,-SWITCH_FRAME_SIZE(r1)
406 /* r3-r13 are caller saved -- Cort */
407 SAVE_8GPRS(14, r1)
408 SAVE_10GPRS(22, r1)
409 mflr r20 /* Return to switch caller */
410 mfmsr r22
411 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000412#ifdef CONFIG_VSX
413BEGIN_FTR_SECTION
414 oris r0,r0,MSR_VSX@h /* Disable VSX */
415END_FTR_SECTION_IFSET(CPU_FTR_VSX)
416#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000417#ifdef CONFIG_ALTIVEC
418BEGIN_FTR_SECTION
419 oris r0,r0,MSR_VEC@h /* Disable altivec */
420 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
421 std r24,THREAD_VRSAVE(r3)
422END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
423#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000424#ifdef CONFIG_PPC64
425BEGIN_FTR_SECTION
426 mfspr r25,SPRN_DSCR
427 std r25,THREAD_DSCR(r3)
428END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
429#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000430 and. r0,r0,r22
431 beq+ 1f
432 andc r22,r22,r0
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000433 MTMSRD(r22)
Paul Mackerras9994a332005-10-10 22:36:14 +1000434 isync
4351: std r20,_NIP(r1)
436 mfcr r23
437 std r23,_CCR(r1)
438 std r1,KSP(r3) /* Set old stack pointer */
439
440#ifdef CONFIG_SMP
441 /* We need a sync somewhere here to make sure that if the
442 * previous task gets rescheduled on another CPU, it sees all
443 * stores it has performed on this one.
444 */
445 sync
446#endif /* CONFIG_SMP */
447
Anton Blanchardf89451f2010-08-11 01:40:27 +0000448 /*
449 * If we optimise away the clear of the reservation in system
450 * calls because we know the CPU tracks the address of the
451 * reservation, then we need to clear it here to cover the
452 * case that the kernel context switch path has no larx
453 * instructions.
454 */
455BEGIN_FTR_SECTION
456 ldarx r6,0,r1
457END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
458
Paul Mackerras9994a332005-10-10 22:36:14 +1000459 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
460 std r6,PACACURRENT(r13) /* Set new 'current' */
461
462 ld r8,KSP(r4) /* new stack pointer */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000463#ifdef CONFIG_PPC_BOOK3S
Paul Mackerras9994a332005-10-10 22:36:14 +1000464BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000465 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000466 clrrdi r6,r8,28 /* get its ESID */
467 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000468 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000469 clrrdi r6,r8,40 /* get its 1T ESID */
470 clrrdi r9,r1,40 /* get current sp 1T ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000471 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
472FTR_SECTION_ELSE
473 b 2f
474ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000475 clrldi. r0,r6,2 /* is new ESID c00000000? */
476 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
477 cror eq,4*cr1+eq,eq
478 beq 2f /* if yes, don't slbie it */
479
480 /* Bolt in the new stack SLB entry */
481 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
482 oris r0,r6,(SLB_ESID_V)@h
483 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000484BEGIN_FTR_SECTION
485 li r9,MMU_SEGSIZE_1T /* insert B field */
486 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
487 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
488END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000489
Michael Neuling00efee72007-08-24 16:58:37 +1000490 /* Update the last bolted SLB. No write barriers are needed
491 * here, provided we only update the current CPU's SLB shadow
492 * buffer.
493 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000494 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000495 li r12,0
496 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
497 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
498 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000499
Olof Johanssonf66bce52007-10-16 00:58:59 +1000500 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
501 * we have 1TB segments, the only CPUs known to have the errata
502 * only support less than 1TB of system memory and we'll never
503 * actually hit this code path.
504 */
505
Paul Mackerras9994a332005-10-10 22:36:14 +1000506 slbie r6
507 slbie r6 /* Workaround POWER5 < DD2.1 issue */
508 slbmte r7,r0
509 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10005102:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000511#endif /* !CONFIG_PPC_BOOK3S */
512
Paul Mackerras9994a332005-10-10 22:36:14 +1000513 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
514 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
515 because we don't need to leave the 288-byte ABI gap at the
516 top of the kernel stack. */
517 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
518
519 mr r1,r8 /* start using new stack pointer */
520 std r7,PACAKSAVE(r13)
521
522 ld r6,_CCR(r1)
523 mtcrf 0xFF,r6
524
525#ifdef CONFIG_ALTIVEC
526BEGIN_FTR_SECTION
527 ld r0,THREAD_VRSAVE(r4)
528 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
529END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
530#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000531#ifdef CONFIG_PPC64
532BEGIN_FTR_SECTION
533 ld r0,THREAD_DSCR(r4)
534 cmpd r0,r25
535 beq 1f
536 mtspr SPRN_DSCR,r0
5371:
538END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
539#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000540
541 /* r3-r13 are destroyed -- Cort */
542 REST_8GPRS(14, r1)
543 REST_10GPRS(22, r1)
544
545 /* convert old thread to its task_struct for return value */
546 addi r3,r3,-THREAD
547 ld r7,_NIP(r1) /* Return to _switch caller in new task */
548 mtlr r7
549 addi r1,r1,SWITCH_FRAME_SIZE
550 blr
551
552 .align 7
553_GLOBAL(ret_from_except)
554 ld r11,_TRAP(r1)
555 andi. r0,r11,1
556 bne .ret_from_except_lite
557 REST_NVGPRS(r1)
558
559_GLOBAL(ret_from_except_lite)
560 /*
561 * Disable interrupts so that current_thread_info()->flags
562 * can't change between when we test it and when we return
563 * from the interrupt.
564 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000565#ifdef CONFIG_PPC_BOOK3E
566 wrteei 0
567#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000568 mfmsr r10 /* Get current interrupt state */
569 rldicl r9,r10,48,1 /* clear MSR_EE */
570 rotldi r9,r9,16
571 mtmsrd r9,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000572#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000573
574#ifdef CONFIG_PREEMPT
575 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
576 li r0,_TIF_NEED_RESCHED /* bits to check */
577 ld r3,_MSR(r1)
578 ld r4,TI_FLAGS(r9)
579 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
580 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
581 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
582 bne do_work
583
584#else /* !CONFIG_PREEMPT */
585 ld r3,_MSR(r1) /* Returning to user mode? */
586 andi. r3,r3,MSR_PR
587 beq restore /* if not, just restore regs and return */
588
589 /* Check current_thread_info()->flags */
590 clrrdi r9,r1,THREAD_SHIFT
591 ld r4,TI_FLAGS(r9)
592 andi. r0,r4,_TIF_USER_WORK_MASK
593 bne do_work
594#endif
595
596restore:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000597BEGIN_FW_FTR_SECTION
Michael Ellerman01f38802008-07-16 14:21:34 +1000598 ld r5,SOFTE(r1)
599FW_FTR_SECTION_ELSE
Anton Blanchard917e4072009-10-18 01:24:29 +0000600 b .Liseries_check_pending_irqs
Michael Ellerman01f38802008-07-16 14:21:34 +1000601ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
6022:
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000603 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000604
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000605 /* extract EE bit and use it to restore paca->hard_enabled */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100606 ld r3,_MSR(r1)
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000607 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
608 stb r4,PACAHARDIRQEN(r13)
609
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000610#ifdef CONFIG_PPC_BOOK3E
611 b .exception_return_book3e
612#else
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100613 ld r4,_CTR(r1)
614 ld r0,_LINK(r1)
615 mtctr r4
616 mtlr r0
617 ld r4,_XER(r1)
618 mtspr SPRN_XER,r4
619
620 REST_8GPRS(5, r1)
621
622 andi. r0,r3,MSR_RI
623 beq- unrecov_restore
624
Anton Blanchardf89451f2010-08-11 01:40:27 +0000625 /*
626 * Clear the reservation. If we know the CPU tracks the address of
627 * the reservation then we can potentially save some cycles and use
628 * a larx. On POWER6 and POWER7 this is significantly faster.
629 */
630BEGIN_FTR_SECTION
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100631 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000632FTR_SECTION_ELSE
633 ldarx r4,0,r1
634ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100635
636 /*
637 * Clear RI before restoring r13. If we are returning to
638 * userspace and we take an exception after restoring r13,
639 * we end up corrupting the userspace r13 value.
640 */
641 mfmsr r4
642 andc r4,r4,r0 /* r0 contains MSR_RI here */
643 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000644
645 /*
646 * r13 is our per cpu area, only restore it if we are returning to
647 * userspace
648 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100649 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000650 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100651 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000652 REST_GPR(13, r1)
6531:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100654 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000655
656 ld r2,_CCR(r1)
657 mtcrf 0xFF,r2
658 ld r2,_NIP(r1)
659 mtspr SPRN_SRR0,r2
660
661 ld r0,GPR0(r1)
662 ld r2,GPR2(r1)
663 ld r3,GPR3(r1)
664 ld r4,GPR4(r1)
665 ld r1,GPR1(r1)
666
667 rfid
668 b . /* prevent speculative execution */
669
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000670#endif /* CONFIG_PPC_BOOK3E */
671
Anton Blanchard917e4072009-10-18 01:24:29 +0000672.Liseries_check_pending_irqs:
Michael Ellerman01f38802008-07-16 14:21:34 +1000673#ifdef CONFIG_PPC_ISERIES
674 ld r5,SOFTE(r1)
675 cmpdi 0,r5,0
676 beq 2b
677 /* Check for pending interrupts (iSeries) */
678 ld r3,PACALPPACAPTR(r13)
679 ld r3,LPPACAANYINT(r3)
680 cmpdi r3,0
681 beq+ 2b /* skip do_IRQ if no interrupts */
682
683 li r3,0
684 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
685#ifdef CONFIG_TRACE_IRQFLAGS
686 bl .trace_hardirqs_off
687 mfmsr r10
688#endif
689 ori r10,r10,MSR_EE
690 mtmsrd r10 /* hard-enable again */
691 addi r3,r1,STACK_FRAME_OVERHEAD
692 bl .do_IRQ
693 b .ret_from_except_lite /* loop back and handle more */
694#endif
695
Paul Mackerras9994a332005-10-10 22:36:14 +1000696do_work:
697#ifdef CONFIG_PREEMPT
698 andi. r0,r3,MSR_PR /* Returning to user mode? */
699 bne user_work
700 /* Check that preempt_count() == 0 and interrupts are enabled */
701 lwz r8,TI_PREEMPT(r9)
702 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000703 ld r0,SOFTE(r1)
704 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000705 crandc eq,cr1*4+eq,eq
706 bne restore
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000707
708 /* Here we are preempting the current task.
709 *
710 * Ensure interrupts are soft-disabled. We also properly mark
711 * the PACA to reflect the fact that they are hard-disabled
712 * and trace the change
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000713 */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000714 li r0,0
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000715 stb r0,PACASOFTIRQEN(r13)
716 stb r0,PACAHARDIRQEN(r13)
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000717 TRACE_DISABLE_INTS
718
719 /* Call the scheduler with soft IRQs off */
7201: bl .preempt_schedule_irq
721
722 /* Hard-disable interrupts again (and update PACA) */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000723#ifdef CONFIG_PPC_BOOK3E
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000724 wrteei 0
725#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000726 mfmsr r10
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000727 rldicl r10,r10,48,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000728 rotldi r10,r10,16
729 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000730#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000731 li r0,0
732 stb r0,PACAHARDIRQEN(r13)
733
734 /* Re-test flags and eventually loop */
735 clrrdi r9,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000736 ld r4,TI_FLAGS(r9)
737 andi. r0,r4,_TIF_NEED_RESCHED
738 bne 1b
739 b restore
740
741user_work:
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000742#endif /* CONFIG_PREEMPT */
743
Paul Mackerras9994a332005-10-10 22:36:14 +1000744 /* Enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000745#ifdef CONFIG_PPC_BOOK3E
746 wrteei 1
747#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000748 ori r10,r10,MSR_EE
749 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000750#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000751
752 andi. r0,r4,_TIF_NEED_RESCHED
753 beq 1f
754 bl .schedule
755 b .ret_from_except_lite
756
7571: bl .save_nvgprs
Roland McGrath7d6d6372008-07-27 16:52:52 +1000758 addi r3,r1,STACK_FRAME_OVERHEAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000759 bl .do_signal
760 b .ret_from_except
761
762unrecov_restore:
763 addi r3,r1,STACK_FRAME_OVERHEAD
764 bl .unrecoverable_exception
765 b unrecov_restore
766
767#ifdef CONFIG_PPC_RTAS
768/*
769 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
770 * called with the MMU off.
771 *
772 * In addition, we need to be in 32b mode, at least for now.
773 *
774 * Note: r3 is an input parameter to rtas, so don't trash it...
775 */
776_GLOBAL(enter_rtas)
777 mflr r0
778 std r0,16(r1)
779 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
780
781 /* Because RTAS is running in 32b mode, it clobbers the high order half
782 * of all registers that it saves. We therefore save those registers
783 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
784 */
785 SAVE_GPR(2, r1) /* Save the TOC */
786 SAVE_GPR(13, r1) /* Save paca */
787 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
788 SAVE_10GPRS(22, r1) /* ditto */
789
790 mfcr r4
791 std r4,_CCR(r1)
792 mfctr r5
793 std r5,_CTR(r1)
794 mfspr r6,SPRN_XER
795 std r6,_XER(r1)
796 mfdar r7
797 std r7,_DAR(r1)
798 mfdsisr r8
799 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000800
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800801 /* Temporary workaround to clear CR until RTAS can be modified to
802 * ignore all bits.
803 */
804 li r0,0
805 mtcr r0
806
David Woodhouse007d88d2007-01-01 18:45:34 +0000807#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000808 /* There is no way it is acceptable to get here with interrupts enabled,
809 * check it with the asm equivalent of WARN_ON
810 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000811 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10008121: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000813 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
814#endif
815
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000816 /* Hard-disable interrupts */
817 mfmsr r6
818 rldicl r7,r6,48,1
819 rotldi r7,r7,16
820 mtmsrd r7,1
821
Paul Mackerras9994a332005-10-10 22:36:14 +1000822 /* Unfortunately, the stack pointer and the MSR are also clobbered,
823 * so they are saved in the PACA which allows us to restore
824 * our original state after RTAS returns.
825 */
826 std r1,PACAR1(r13)
827 std r6,PACASAVEDMSR(r13)
828
829 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100830 LOAD_REG_ADDR(r4,.rtas_return_loc)
831 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000832 mtlr r4
833
834 li r0,0
835 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
836 andc r0,r6,r0
837
838 li r9,1
839 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Anton Blanchard44c9f3c2010-02-07 19:37:29 +0000840 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
Paul Mackerras9994a332005-10-10 22:36:14 +1000841 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +1000842 sync /* disable interrupts so SRR0/1 */
843 mtmsrd r0 /* don't get trashed */
844
David Gibsone58c3492006-01-13 14:56:25 +1100845 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000846 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
847 ld r4,RTASBASE(r4) /* get the rtas->base value */
848
849 mtspr SPRN_SRR0,r5
850 mtspr SPRN_SRR1,r6
851 rfid
852 b . /* prevent speculative execution */
853
854_STATIC(rtas_return_loc)
855 /* relocation is off at this point */
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +1100856 GET_PACA(r4)
David Gibsone58c3492006-01-13 14:56:25 +1100857 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000858
Paul Mackerrase31aa452008-08-30 11:41:12 +1000859 bcl 20,31,$+4
8600: mflr r3
861 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
862
Paul Mackerras9994a332005-10-10 22:36:14 +1000863 mfmsr r6
864 li r0,MSR_RI
865 andc r6,r6,r0
866 sync
867 mtmsrd r6
868
869 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +1000870 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
871
872 mtspr SPRN_SRR0,r3
873 mtspr SPRN_SRR1,r4
874 rfid
875 b . /* prevent speculative execution */
876
Paul Mackerrase31aa452008-08-30 11:41:12 +1000877 .align 3
8781: .llong .rtas_restore_regs
879
Paul Mackerras9994a332005-10-10 22:36:14 +1000880_STATIC(rtas_restore_regs)
881 /* relocation is on at this point */
882 REST_GPR(2, r1) /* Restore the TOC */
883 REST_GPR(13, r1) /* Restore paca */
884 REST_8GPRS(14, r1) /* Restore the non-volatiles */
885 REST_10GPRS(22, r1) /* ditto */
886
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +1100887 GET_PACA(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000888
889 ld r4,_CCR(r1)
890 mtcr r4
891 ld r5,_CTR(r1)
892 mtctr r5
893 ld r6,_XER(r1)
894 mtspr SPRN_XER,r6
895 ld r7,_DAR(r1)
896 mtdar r7
897 ld r8,_DSISR(r1)
898 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +1000899
900 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
901 ld r0,16(r1) /* get return address */
902
903 mtlr r0
904 blr /* return to caller */
905
906#endif /* CONFIG_PPC_RTAS */
907
Paul Mackerras9994a332005-10-10 22:36:14 +1000908_GLOBAL(enter_prom)
909 mflr r0
910 std r0,16(r1)
911 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
912
913 /* Because PROM is running in 32b mode, it clobbers the high order half
914 * of all registers that it saves. We therefore save those registers
915 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
916 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000917 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000918 SAVE_GPR(13, r1)
919 SAVE_8GPRS(14, r1)
920 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000921 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +1000922 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000923 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000924 std r11,_MSR(r1)
925
926 /* Get the PROM entrypoint */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000927 mtlr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000928
929 /* Switch MSR to 32 bits mode
930 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000931#ifdef CONFIG_PPC_BOOK3E
932 rlwinm r11,r11,0,1,31
933 mtmsr r11
934#else /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000935 mfmsr r11
936 li r12,1
937 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
938 andc r11,r11,r12
939 li r12,1
940 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
941 andc r11,r11,r12
942 mtmsrd r11
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000943#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000944 isync
945
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000946 /* Enter PROM here... */
Paul Mackerras9994a332005-10-10 22:36:14 +1000947 blrl
948
949 /* Just make sure that r1 top 32 bits didn't get
950 * corrupt by OF
951 */
952 rldicl r1,r1,0,32
953
954 /* Restore the MSR (back to 64 bits) */
955 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000956 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +1000957 isync
958
959 /* Restore other registers */
960 REST_GPR(2, r1)
961 REST_GPR(13, r1)
962 REST_8GPRS(14, r1)
963 REST_10GPRS(22, r1)
964 ld r4,_CCR(r1)
965 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000966
967 addi r1,r1,PROM_FRAME_SIZE
968 ld r0,16(r1)
969 mtlr r0
970 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -0400971
Steven Rostedt606576c2008-10-06 19:06:12 -0400972#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -0400973#ifdef CONFIG_DYNAMIC_FTRACE
974_GLOBAL(mcount)
975_GLOBAL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -0400976 blr
977
978_GLOBAL(ftrace_caller)
979 /* Taken from output of objdump from lib64/glibc */
980 mflr r3
981 ld r11, 0(r1)
982 stdu r1, -112(r1)
983 std r3, 128(r1)
984 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530985 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400986.globl ftrace_call
987ftrace_call:
988 bl ftrace_stub
989 nop
Steven Rostedt46542882009-02-10 22:19:54 -0800990#ifdef CONFIG_FUNCTION_GRAPH_TRACER
991.globl ftrace_graph_call
992ftrace_graph_call:
993 b ftrace_graph_stub
994_GLOBAL(ftrace_graph_stub)
995#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -0400996 ld r0, 128(r1)
997 mtlr r0
998 addi r1, r1, 112
999_GLOBAL(ftrace_stub)
1000 blr
1001#else
1002_GLOBAL(mcount)
1003 blr
1004
1005_GLOBAL(_mcount)
1006 /* Taken from output of objdump from lib64/glibc */
1007 mflr r3
1008 ld r11, 0(r1)
1009 stdu r1, -112(r1)
1010 std r3, 128(r1)
1011 ld r4, 16(r11)
1012
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301013 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001014 LOAD_REG_ADDR(r5,ftrace_trace_function)
1015 ld r5,0(r5)
1016 ld r5,0(r5)
1017 mtctr r5
1018 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001019 nop
Steven Rostedt6794c782009-02-09 21:10:27 -08001020
1021
1022#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1023 b ftrace_graph_caller
1024#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001025 ld r0, 128(r1)
1026 mtlr r0
1027 addi r1, r1, 112
1028_GLOBAL(ftrace_stub)
1029 blr
1030
Steven Rostedt6794c782009-02-09 21:10:27 -08001031#endif /* CONFIG_DYNAMIC_FTRACE */
1032
1033#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt46542882009-02-10 22:19:54 -08001034_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001035 /* load r4 with local address */
1036 ld r4, 128(r1)
1037 subi r4, r4, MCOUNT_INSN_SIZE
1038
1039 /* get the parent address */
1040 ld r11, 112(r1)
1041 addi r3, r11, 16
1042
1043 bl .prepare_ftrace_return
1044 nop
1045
1046 ld r0, 128(r1)
1047 mtlr r0
1048 addi r1, r1, 112
1049 blr
1050
1051_GLOBAL(return_to_handler)
1052 /* need to save return values */
Steven Rostedtbb725342009-02-11 12:45:49 -08001053 std r4, -24(r1)
1054 std r3, -16(r1)
1055 std r31, -8(r1)
1056 mr r31, r1
1057 stdu r1, -112(r1)
1058
1059 bl .ftrace_return_to_handler
1060 nop
1061
1062 /* return value has real return address */
1063 mtlr r3
1064
1065 ld r1, 0(r1)
1066 ld r4, -24(r1)
1067 ld r3, -16(r1)
1068 ld r31, -8(r1)
1069
1070 /* Jump back to real return address */
1071 blr
1072
1073_GLOBAL(mod_return_to_handler)
1074 /* need to save return values */
Steven Rostedt6794c782009-02-09 21:10:27 -08001075 std r4, -32(r1)
1076 std r3, -24(r1)
1077 /* save TOC */
1078 std r2, -16(r1)
1079 std r31, -8(r1)
1080 mr r31, r1
1081 stdu r1, -112(r1)
1082
Steven Rostedtbb725342009-02-11 12:45:49 -08001083 /*
1084 * We are in a module using the module's TOC.
1085 * Switch to our TOC to run inside the core kernel.
1086 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001087 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001088
1089 bl .ftrace_return_to_handler
1090 nop
1091
1092 /* return value has real return address */
1093 mtlr r3
1094
1095 ld r1, 0(r1)
1096 ld r4, -32(r1)
1097 ld r3, -24(r1)
1098 ld r2, -16(r1)
1099 ld r31, -8(r1)
1100
1101 /* Jump back to real return address */
1102 blr
1103#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1104#endif /* CONFIG_FUNCTION_TRACER */