blob: 4d5fa12ca6e83db318c922834f0d06f01dac367d [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100045 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100046
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110064 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerrasab598b62008-11-30 11:49:45 +000065 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */
71 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100072 std r2,GPR2(r1)
73 std r3,GPR3(r1)
74 std r4,GPR4(r1)
75 std r5,GPR5(r1)
76 std r6,GPR6(r1)
77 std r7,GPR7(r1)
78 std r8,GPR8(r1)
79 li r11,0
80 std r11,GPR9(r1)
81 std r11,GPR10(r1)
82 std r11,GPR11(r1)
83 std r11,GPR12(r1)
84 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100085 mfcr r9
86 mflr r10
87 li r11,0xc01
88 std r9,_CCR(r1)
89 std r10,_LINK(r1)
90 std r11,_TRAP(r1)
91 mfxer r9
92 mfctr r10
93 std r9,_XER(r1)
94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000100#ifdef CONFIG_TRACE_IRQFLAGS
101 bl .trace_hardirqs_on
102 REST_GPR(0,r1)
103 REST_4GPRS(3,r1)
104 REST_2GPRS(7,r1)
105 addi r9,r1,STACK_FRAME_OVERHEAD
106 ld r12,_MSR(r1)
107#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000108 li r10,1
109 stb r10,PACASOFTIRQEN(r13)
110 stb r10,PACAHARDIRQEN(r13)
111 std r10,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000112#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000113BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000114 /* Hack for handling interrupts when soft-enabling on iSeries */
115 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
116 andi. r10,r12,MSR_PR /* from kernel */
117 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
Stephen Rothwellc7056772006-11-27 14:59:50 +1100118 bne 2f
119 b hardware_interrupt_entry
1202:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000121END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000122#endif /* CONFIG_PPC_ISERIES */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000123
124 /* Hard enable interrupts */
125#ifdef CONFIG_PPC_BOOK3E
126 wrteei 1
127#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000128 mfmsr r11
129 ori r11,r11,MSR_EE
130 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000131#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000132
133#ifdef SHOW_SYSCALLS
134 bl .do_show_syscall
135 REST_GPR(0,r1)
136 REST_4GPRS(3,r1)
137 REST_2GPRS(7,r1)
138 addi r9,r1,STACK_FRAME_OVERHEAD
139#endif
140 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000141 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000142 andi. r11,r10,_TIF_SYSCALL_T_OR_A
143 bne- syscall_dotrace
144syscall_dotrace_cont:
145 cmpldi 0,r0,NR_syscalls
146 bge- syscall_enosys
147
148system_call: /* label this so stack traces look sane */
149/*
150 * Need to vector to 32 Bit or default sys_call_table here,
151 * based on caller's run-mode / personality.
152 */
153 ld r11,.SYS_CALL_TABLE@toc(2)
154 andi. r10,r10,_TIF_32BIT
155 beq 15f
156 addi r11,r11,8 /* use 32-bit syscall entries */
157 clrldi r3,r3,32
158 clrldi r4,r4,32
159 clrldi r5,r5,32
160 clrldi r6,r6,32
161 clrldi r7,r7,32
162 clrldi r8,r8,32
16315:
164 slwi r0,r0,4
165 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
166 mtctr r10
167 bctrl /* Call handler */
168
169syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000170 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000171#ifdef SHOW_SYSCALLS
172 bl .do_show_syscall_exit
173 ld r3,RESULT(r1)
174#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000175 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000176
Paul Mackerras9994a332005-10-10 22:36:14 +1000177 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000178#ifdef CONFIG_PPC_BOOK3S
179 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000180 andi. r10,r8,MSR_RI
181 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000182#endif
183
184 /* Disable interrupts so current_thread_info()->flags can't change,
185 * and so that we don't get interrupted after loading SRR0/1.
186 */
187#ifdef CONFIG_PPC_BOOK3E
188 wrteei 0
189#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000190 mfmsr r10
191 rldicl r10,r10,48,1
192 rotldi r10,r10,16
193 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000194#endif /* CONFIG_PPC_BOOK3E */
195
Paul Mackerras9994a332005-10-10 22:36:14 +1000196 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000197 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100198 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000199 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000200 cmpld r3,r11
201 ld r5,_CCR(r1)
202 bge- syscall_error
203syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000204 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000205BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000206 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000207END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000208 andi. r6,r8,MSR_PR
209 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100210 /*
211 * Clear RI before restoring r13. If we are returning to
212 * userspace and we take an exception after restoring r13,
213 * we end up corrupting the userspace r13 value.
214 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000215#ifdef CONFIG_PPC_BOOK3S
216 /* No MSR:RI on BookE */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100217 li r12,MSR_RI
218 andc r11,r10,r12
219 mtmsrd r11,1 /* clear MSR.RI */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000220#endif /* CONFIG_PPC_BOOK3S */
221
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100222 beq- 1f
223 ACCOUNT_CPU_USER_EXIT(r11, r12)
224 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002251: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000226 ld r1,GPR1(r1)
227 mtlr r4
228 mtcr r5
229 mtspr SPRN_SRR0,r7
230 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000231 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000232 b . /* prevent speculative execution */
233
David Woodhouse401d1f02005-11-15 18:52:18 +0000234syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000235 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000236 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000237 std r5,_CCR(r1)
238 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000239
Paul Mackerras9994a332005-10-10 22:36:14 +1000240/* Traced system call support */
241syscall_dotrace:
242 bl .save_nvgprs
243 addi r3,r1,STACK_FRAME_OVERHEAD
244 bl .do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000245 /*
246 * Restore argument registers possibly just changed.
247 * We use the return value of do_syscall_trace_enter
248 * for the call number to look up in the table (r0).
249 */
250 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000251 ld r3,GPR3(r1)
252 ld r4,GPR4(r1)
253 ld r5,GPR5(r1)
254 ld r6,GPR6(r1)
255 ld r7,GPR7(r1)
256 ld r8,GPR8(r1)
257 addi r9,r1,STACK_FRAME_OVERHEAD
258 clrrdi r10,r1,THREAD_SHIFT
259 ld r10,TI_FLAGS(r10)
260 b syscall_dotrace_cont
261
David Woodhouse401d1f02005-11-15 18:52:18 +0000262syscall_enosys:
263 li r3,-ENOSYS
264 b syscall_exit
265
266syscall_exit_work:
267 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
268 If TIF_NOERROR is set, just save r3 as it is. */
269
270 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100271 beq+ 0f
272 REST_NVGPRS(r1)
273 b 2f
2740: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000275 blt+ 1f
276 andi. r0,r9,_TIF_NOERROR
277 bne- 1f
278 ld r5,_CCR(r1)
279 neg r3,r3
280 oris r5,r5,0x1000 /* Set SO bit in CR */
281 std r5,_CCR(r1)
2821: std r3,GPR3(r1)
2832: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
284 beq 4f
285
Paul Mackerras1bd79332006-03-08 13:24:22 +1100286 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000287
288 li r11,_TIF_PERSYSCALL_MASK
289 addi r12,r12,TI_FLAGS
2903: ldarx r10,0,r12
291 andc r10,r10,r11
292 stdcx. r10,0,r12
293 bne- 3b
294 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100295
2964: /* Anything else left to do? */
297 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000298 beq .ret_from_except_lite
299
300 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000301#ifdef CONFIG_PPC_BOOK3E
302 wrteei 1
303#else
David Woodhouse401d1f02005-11-15 18:52:18 +0000304 mfmsr r10
305 ori r10,r10,MSR_EE
306 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000307#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000308
Paul Mackerras1bd79332006-03-08 13:24:22 +1100309 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000310 addi r3,r1,STACK_FRAME_OVERHEAD
311 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100312 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000313
314/* Save non-volatile GPRs, if not already saved. */
315_GLOBAL(save_nvgprs)
316 ld r11,_TRAP(r1)
317 andi. r0,r11,1
318 beqlr-
319 SAVE_NVGPRS(r1)
320 clrrdi r0,r11,1
321 std r0,_TRAP(r1)
322 blr
323
David Woodhouse401d1f02005-11-15 18:52:18 +0000324
Paul Mackerras9994a332005-10-10 22:36:14 +1000325/*
326 * The sigsuspend and rt_sigsuspend system calls can call do_signal
327 * and thus put the process into the stopped state where we might
328 * want to examine its user state with ptrace. Therefore we need
329 * to save all the nonvolatile registers (r14 - r31) before calling
330 * the C code. Similarly, fork, vfork and clone need the full
331 * register state on the stack so that it can be copied to the child.
332 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000333
334_GLOBAL(ppc_fork)
335 bl .save_nvgprs
336 bl .sys_fork
337 b syscall_exit
338
339_GLOBAL(ppc_vfork)
340 bl .save_nvgprs
341 bl .sys_vfork
342 b syscall_exit
343
344_GLOBAL(ppc_clone)
345 bl .save_nvgprs
346 bl .sys_clone
347 b syscall_exit
348
Paul Mackerras1bd79332006-03-08 13:24:22 +1100349_GLOBAL(ppc32_swapcontext)
350 bl .save_nvgprs
351 bl .compat_sys_swapcontext
352 b syscall_exit
353
354_GLOBAL(ppc64_swapcontext)
355 bl .save_nvgprs
356 bl .sys_swapcontext
357 b syscall_exit
358
Paul Mackerras9994a332005-10-10 22:36:14 +1000359_GLOBAL(ret_from_fork)
360 bl .schedule_tail
361 REST_NVGPRS(r1)
362 li r3,0
363 b syscall_exit
364
365/*
366 * This routine switches between two different tasks. The process
367 * state of one is saved on its kernel stack. Then the state
368 * of the other is restored from its kernel stack. The memory
369 * management hardware is updated to the second process's state.
370 * Finally, we can return to the second process, via ret_from_except.
371 * On entry, r3 points to the THREAD for the current task, r4
372 * points to the THREAD for the new task.
373 *
374 * Note: there are two ways to get to the "going out" portion
375 * of this code; either by coming in via the entry (_switch)
376 * or via "fork" which must set up an environment equivalent
377 * to the "_switch" path. If you change this you'll have to change
378 * the fork code also.
379 *
380 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600381 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000382 */
383 .align 7
384_GLOBAL(_switch)
385 mflr r0
386 std r0,16(r1)
387 stdu r1,-SWITCH_FRAME_SIZE(r1)
388 /* r3-r13 are caller saved -- Cort */
389 SAVE_8GPRS(14, r1)
390 SAVE_10GPRS(22, r1)
391 mflr r20 /* Return to switch caller */
392 mfmsr r22
393 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000394#ifdef CONFIG_VSX
395BEGIN_FTR_SECTION
396 oris r0,r0,MSR_VSX@h /* Disable VSX */
397END_FTR_SECTION_IFSET(CPU_FTR_VSX)
398#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000399#ifdef CONFIG_ALTIVEC
400BEGIN_FTR_SECTION
401 oris r0,r0,MSR_VEC@h /* Disable altivec */
402 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
403 std r24,THREAD_VRSAVE(r3)
404END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
405#endif /* CONFIG_ALTIVEC */
406 and. r0,r0,r22
407 beq+ 1f
408 andc r22,r22,r0
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000409 MTMSRD(r22)
Paul Mackerras9994a332005-10-10 22:36:14 +1000410 isync
4111: std r20,_NIP(r1)
412 mfcr r23
413 std r23,_CCR(r1)
414 std r1,KSP(r3) /* Set old stack pointer */
415
416#ifdef CONFIG_SMP
417 /* We need a sync somewhere here to make sure that if the
418 * previous task gets rescheduled on another CPU, it sees all
419 * stores it has performed on this one.
420 */
421 sync
422#endif /* CONFIG_SMP */
423
Anton Blanchardf89451f2010-08-11 01:40:27 +0000424 /*
425 * If we optimise away the clear of the reservation in system
426 * calls because we know the CPU tracks the address of the
427 * reservation, then we need to clear it here to cover the
428 * case that the kernel context switch path has no larx
429 * instructions.
430 */
431BEGIN_FTR_SECTION
432 ldarx r6,0,r1
433END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
434
Paul Mackerras9994a332005-10-10 22:36:14 +1000435 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
436 std r6,PACACURRENT(r13) /* Set new 'current' */
437
438 ld r8,KSP(r4) /* new stack pointer */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000439#ifdef CONFIG_PPC_BOOK3S
Paul Mackerras9994a332005-10-10 22:36:14 +1000440BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000441 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000442 clrrdi r6,r8,28 /* get its ESID */
443 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000444 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000445 clrrdi r6,r8,40 /* get its 1T ESID */
446 clrrdi r9,r1,40 /* get current sp 1T ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000447 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
448FTR_SECTION_ELSE
449 b 2f
450ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000451 clrldi. r0,r6,2 /* is new ESID c00000000? */
452 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
453 cror eq,4*cr1+eq,eq
454 beq 2f /* if yes, don't slbie it */
455
456 /* Bolt in the new stack SLB entry */
457 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
458 oris r0,r6,(SLB_ESID_V)@h
459 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000460BEGIN_FTR_SECTION
461 li r9,MMU_SEGSIZE_1T /* insert B field */
462 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
463 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
464END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000465
Michael Neuling00efee72007-08-24 16:58:37 +1000466 /* Update the last bolted SLB. No write barriers are needed
467 * here, provided we only update the current CPU's SLB shadow
468 * buffer.
469 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000470 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000471 li r12,0
472 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
473 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
474 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000475
Olof Johanssonf66bce52007-10-16 00:58:59 +1000476 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
477 * we have 1TB segments, the only CPUs known to have the errata
478 * only support less than 1TB of system memory and we'll never
479 * actually hit this code path.
480 */
481
Paul Mackerras9994a332005-10-10 22:36:14 +1000482 slbie r6
483 slbie r6 /* Workaround POWER5 < DD2.1 issue */
484 slbmte r7,r0
485 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10004862:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000487#endif /* !CONFIG_PPC_BOOK3S */
488
Paul Mackerras9994a332005-10-10 22:36:14 +1000489 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
490 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
491 because we don't need to leave the 288-byte ABI gap at the
492 top of the kernel stack. */
493 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
494
495 mr r1,r8 /* start using new stack pointer */
496 std r7,PACAKSAVE(r13)
497
498 ld r6,_CCR(r1)
499 mtcrf 0xFF,r6
500
501#ifdef CONFIG_ALTIVEC
502BEGIN_FTR_SECTION
503 ld r0,THREAD_VRSAVE(r4)
504 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
505END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
506#endif /* CONFIG_ALTIVEC */
507
508 /* r3-r13 are destroyed -- Cort */
509 REST_8GPRS(14, r1)
510 REST_10GPRS(22, r1)
511
512 /* convert old thread to its task_struct for return value */
513 addi r3,r3,-THREAD
514 ld r7,_NIP(r1) /* Return to _switch caller in new task */
515 mtlr r7
516 addi r1,r1,SWITCH_FRAME_SIZE
517 blr
518
519 .align 7
520_GLOBAL(ret_from_except)
521 ld r11,_TRAP(r1)
522 andi. r0,r11,1
523 bne .ret_from_except_lite
524 REST_NVGPRS(r1)
525
526_GLOBAL(ret_from_except_lite)
527 /*
528 * Disable interrupts so that current_thread_info()->flags
529 * can't change between when we test it and when we return
530 * from the interrupt.
531 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000532#ifdef CONFIG_PPC_BOOK3E
533 wrteei 0
534#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000535 mfmsr r10 /* Get current interrupt state */
536 rldicl r9,r10,48,1 /* clear MSR_EE */
537 rotldi r9,r9,16
538 mtmsrd r9,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000539#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000540
541#ifdef CONFIG_PREEMPT
542 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
543 li r0,_TIF_NEED_RESCHED /* bits to check */
544 ld r3,_MSR(r1)
545 ld r4,TI_FLAGS(r9)
546 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
547 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
548 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
549 bne do_work
550
551#else /* !CONFIG_PREEMPT */
552 ld r3,_MSR(r1) /* Returning to user mode? */
553 andi. r3,r3,MSR_PR
554 beq restore /* if not, just restore regs and return */
555
556 /* Check current_thread_info()->flags */
557 clrrdi r9,r1,THREAD_SHIFT
558 ld r4,TI_FLAGS(r9)
559 andi. r0,r4,_TIF_USER_WORK_MASK
560 bne do_work
561#endif
562
563restore:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000564BEGIN_FW_FTR_SECTION
Michael Ellerman01f38802008-07-16 14:21:34 +1000565 ld r5,SOFTE(r1)
566FW_FTR_SECTION_ELSE
Anton Blanchard917e4072009-10-18 01:24:29 +0000567 b .Liseries_check_pending_irqs
Michael Ellerman01f38802008-07-16 14:21:34 +1000568ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5692:
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000570 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000571
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000572 /* extract EE bit and use it to restore paca->hard_enabled */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100573 ld r3,_MSR(r1)
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000574 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
575 stb r4,PACAHARDIRQEN(r13)
576
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000577#ifdef CONFIG_PPC_BOOK3E
578 b .exception_return_book3e
579#else
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100580 ld r4,_CTR(r1)
581 ld r0,_LINK(r1)
582 mtctr r4
583 mtlr r0
584 ld r4,_XER(r1)
585 mtspr SPRN_XER,r4
586
587 REST_8GPRS(5, r1)
588
589 andi. r0,r3,MSR_RI
590 beq- unrecov_restore
591
Anton Blanchardf89451f2010-08-11 01:40:27 +0000592 /*
593 * Clear the reservation. If we know the CPU tracks the address of
594 * the reservation then we can potentially save some cycles and use
595 * a larx. On POWER6 and POWER7 this is significantly faster.
596 */
597BEGIN_FTR_SECTION
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100598 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000599FTR_SECTION_ELSE
600 ldarx r4,0,r1
601ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100602
603 /*
604 * Clear RI before restoring r13. If we are returning to
605 * userspace and we take an exception after restoring r13,
606 * we end up corrupting the userspace r13 value.
607 */
608 mfmsr r4
609 andc r4,r4,r0 /* r0 contains MSR_RI here */
610 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000611
612 /*
613 * r13 is our per cpu area, only restore it if we are returning to
614 * userspace
615 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100616 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000617 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100618 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000619 REST_GPR(13, r1)
6201:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100621 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000622
623 ld r2,_CCR(r1)
624 mtcrf 0xFF,r2
625 ld r2,_NIP(r1)
626 mtspr SPRN_SRR0,r2
627
628 ld r0,GPR0(r1)
629 ld r2,GPR2(r1)
630 ld r3,GPR3(r1)
631 ld r4,GPR4(r1)
632 ld r1,GPR1(r1)
633
634 rfid
635 b . /* prevent speculative execution */
636
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000637#endif /* CONFIG_PPC_BOOK3E */
638
Anton Blanchard917e4072009-10-18 01:24:29 +0000639.Liseries_check_pending_irqs:
Michael Ellerman01f38802008-07-16 14:21:34 +1000640#ifdef CONFIG_PPC_ISERIES
641 ld r5,SOFTE(r1)
642 cmpdi 0,r5,0
643 beq 2b
644 /* Check for pending interrupts (iSeries) */
645 ld r3,PACALPPACAPTR(r13)
646 ld r3,LPPACAANYINT(r3)
647 cmpdi r3,0
648 beq+ 2b /* skip do_IRQ if no interrupts */
649
650 li r3,0
651 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
652#ifdef CONFIG_TRACE_IRQFLAGS
653 bl .trace_hardirqs_off
654 mfmsr r10
655#endif
656 ori r10,r10,MSR_EE
657 mtmsrd r10 /* hard-enable again */
658 addi r3,r1,STACK_FRAME_OVERHEAD
659 bl .do_IRQ
660 b .ret_from_except_lite /* loop back and handle more */
661#endif
662
Paul Mackerras9994a332005-10-10 22:36:14 +1000663do_work:
664#ifdef CONFIG_PREEMPT
665 andi. r0,r3,MSR_PR /* Returning to user mode? */
666 bne user_work
667 /* Check that preempt_count() == 0 and interrupts are enabled */
668 lwz r8,TI_PREEMPT(r9)
669 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000670 ld r0,SOFTE(r1)
671 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000672 crandc eq,cr1*4+eq,eq
673 bne restore
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000674
675 /* Here we are preempting the current task.
676 *
677 * Ensure interrupts are soft-disabled. We also properly mark
678 * the PACA to reflect the fact that they are hard-disabled
679 * and trace the change
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000680 */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000681 li r0,0
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000682 stb r0,PACASOFTIRQEN(r13)
683 stb r0,PACAHARDIRQEN(r13)
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000684 TRACE_DISABLE_INTS
685
686 /* Call the scheduler with soft IRQs off */
6871: bl .preempt_schedule_irq
688
689 /* Hard-disable interrupts again (and update PACA) */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000690#ifdef CONFIG_PPC_BOOK3E
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000691 wrteei 0
692#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000693 mfmsr r10
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000694 rldicl r10,r10,48,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000695 rotldi r10,r10,16
696 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000697#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000698 li r0,0
699 stb r0,PACAHARDIRQEN(r13)
700
701 /* Re-test flags and eventually loop */
702 clrrdi r9,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000703 ld r4,TI_FLAGS(r9)
704 andi. r0,r4,_TIF_NEED_RESCHED
705 bne 1b
706 b restore
707
708user_work:
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000709#endif /* CONFIG_PREEMPT */
710
Paul Mackerras9994a332005-10-10 22:36:14 +1000711 /* Enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000712#ifdef CONFIG_PPC_BOOK3E
713 wrteei 1
714#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000715 ori r10,r10,MSR_EE
716 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000717#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000718
719 andi. r0,r4,_TIF_NEED_RESCHED
720 beq 1f
721 bl .schedule
722 b .ret_from_except_lite
723
7241: bl .save_nvgprs
Roland McGrath7d6d6372008-07-27 16:52:52 +1000725 addi r3,r1,STACK_FRAME_OVERHEAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000726 bl .do_signal
727 b .ret_from_except
728
729unrecov_restore:
730 addi r3,r1,STACK_FRAME_OVERHEAD
731 bl .unrecoverable_exception
732 b unrecov_restore
733
734#ifdef CONFIG_PPC_RTAS
735/*
736 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
737 * called with the MMU off.
738 *
739 * In addition, we need to be in 32b mode, at least for now.
740 *
741 * Note: r3 is an input parameter to rtas, so don't trash it...
742 */
743_GLOBAL(enter_rtas)
744 mflr r0
745 std r0,16(r1)
746 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
747
748 /* Because RTAS is running in 32b mode, it clobbers the high order half
749 * of all registers that it saves. We therefore save those registers
750 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
751 */
752 SAVE_GPR(2, r1) /* Save the TOC */
753 SAVE_GPR(13, r1) /* Save paca */
754 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
755 SAVE_10GPRS(22, r1) /* ditto */
756
757 mfcr r4
758 std r4,_CCR(r1)
759 mfctr r5
760 std r5,_CTR(r1)
761 mfspr r6,SPRN_XER
762 std r6,_XER(r1)
763 mfdar r7
764 std r7,_DAR(r1)
765 mfdsisr r8
766 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000767
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800768 /* Temporary workaround to clear CR until RTAS can be modified to
769 * ignore all bits.
770 */
771 li r0,0
772 mtcr r0
773
David Woodhouse007d88d2007-01-01 18:45:34 +0000774#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000775 /* There is no way it is acceptable to get here with interrupts enabled,
776 * check it with the asm equivalent of WARN_ON
777 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000778 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10007791: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000780 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
781#endif
782
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000783 /* Hard-disable interrupts */
784 mfmsr r6
785 rldicl r7,r6,48,1
786 rotldi r7,r7,16
787 mtmsrd r7,1
788
Paul Mackerras9994a332005-10-10 22:36:14 +1000789 /* Unfortunately, the stack pointer and the MSR are also clobbered,
790 * so they are saved in the PACA which allows us to restore
791 * our original state after RTAS returns.
792 */
793 std r1,PACAR1(r13)
794 std r6,PACASAVEDMSR(r13)
795
796 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100797 LOAD_REG_ADDR(r4,.rtas_return_loc)
798 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000799 mtlr r4
800
801 li r0,0
802 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
803 andc r0,r6,r0
804
805 li r9,1
806 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Anton Blanchard44c9f3c2010-02-07 19:37:29 +0000807 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
Paul Mackerras9994a332005-10-10 22:36:14 +1000808 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +1000809 sync /* disable interrupts so SRR0/1 */
810 mtmsrd r0 /* don't get trashed */
811
David Gibsone58c3492006-01-13 14:56:25 +1100812 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000813 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
814 ld r4,RTASBASE(r4) /* get the rtas->base value */
815
816 mtspr SPRN_SRR0,r5
817 mtspr SPRN_SRR1,r6
818 rfid
819 b . /* prevent speculative execution */
820
821_STATIC(rtas_return_loc)
822 /* relocation is off at this point */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000823 mfspr r4,SPRN_SPRG_PACA /* Get PACA */
David Gibsone58c3492006-01-13 14:56:25 +1100824 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000825
Paul Mackerrase31aa452008-08-30 11:41:12 +1000826 bcl 20,31,$+4
8270: mflr r3
828 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
829
Paul Mackerras9994a332005-10-10 22:36:14 +1000830 mfmsr r6
831 li r0,MSR_RI
832 andc r6,r6,r0
833 sync
834 mtmsrd r6
835
836 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +1000837 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
838
839 mtspr SPRN_SRR0,r3
840 mtspr SPRN_SRR1,r4
841 rfid
842 b . /* prevent speculative execution */
843
Paul Mackerrase31aa452008-08-30 11:41:12 +1000844 .align 3
8451: .llong .rtas_restore_regs
846
Paul Mackerras9994a332005-10-10 22:36:14 +1000847_STATIC(rtas_restore_regs)
848 /* relocation is on at this point */
849 REST_GPR(2, r1) /* Restore the TOC */
850 REST_GPR(13, r1) /* Restore paca */
851 REST_8GPRS(14, r1) /* Restore the non-volatiles */
852 REST_10GPRS(22, r1) /* ditto */
853
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000854 mfspr r13,SPRN_SPRG_PACA
Paul Mackerras9994a332005-10-10 22:36:14 +1000855
856 ld r4,_CCR(r1)
857 mtcr r4
858 ld r5,_CTR(r1)
859 mtctr r5
860 ld r6,_XER(r1)
861 mtspr SPRN_XER,r6
862 ld r7,_DAR(r1)
863 mtdar r7
864 ld r8,_DSISR(r1)
865 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +1000866
867 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
868 ld r0,16(r1) /* get return address */
869
870 mtlr r0
871 blr /* return to caller */
872
873#endif /* CONFIG_PPC_RTAS */
874
Paul Mackerras9994a332005-10-10 22:36:14 +1000875_GLOBAL(enter_prom)
876 mflr r0
877 std r0,16(r1)
878 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
879
880 /* Because PROM is running in 32b mode, it clobbers the high order half
881 * of all registers that it saves. We therefore save those registers
882 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
883 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000884 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000885 SAVE_GPR(13, r1)
886 SAVE_8GPRS(14, r1)
887 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000888 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +1000889 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000890 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000891 std r11,_MSR(r1)
892
893 /* Get the PROM entrypoint */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000894 mtlr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000895
896 /* Switch MSR to 32 bits mode
897 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000898#ifdef CONFIG_PPC_BOOK3E
899 rlwinm r11,r11,0,1,31
900 mtmsr r11
901#else /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000902 mfmsr r11
903 li r12,1
904 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
905 andc r11,r11,r12
906 li r12,1
907 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
908 andc r11,r11,r12
909 mtmsrd r11
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000910#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000911 isync
912
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000913 /* Enter PROM here... */
Paul Mackerras9994a332005-10-10 22:36:14 +1000914 blrl
915
916 /* Just make sure that r1 top 32 bits didn't get
917 * corrupt by OF
918 */
919 rldicl r1,r1,0,32
920
921 /* Restore the MSR (back to 64 bits) */
922 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000923 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +1000924 isync
925
926 /* Restore other registers */
927 REST_GPR(2, r1)
928 REST_GPR(13, r1)
929 REST_8GPRS(14, r1)
930 REST_10GPRS(22, r1)
931 ld r4,_CCR(r1)
932 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000933
934 addi r1,r1,PROM_FRAME_SIZE
935 ld r0,16(r1)
936 mtlr r0
937 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -0400938
Steven Rostedt606576c2008-10-06 19:06:12 -0400939#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -0400940#ifdef CONFIG_DYNAMIC_FTRACE
941_GLOBAL(mcount)
942_GLOBAL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -0400943 blr
944
945_GLOBAL(ftrace_caller)
946 /* Taken from output of objdump from lib64/glibc */
947 mflr r3
948 ld r11, 0(r1)
949 stdu r1, -112(r1)
950 std r3, 128(r1)
951 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530952 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400953.globl ftrace_call
954ftrace_call:
955 bl ftrace_stub
956 nop
Steven Rostedt46542882009-02-10 22:19:54 -0800957#ifdef CONFIG_FUNCTION_GRAPH_TRACER
958.globl ftrace_graph_call
959ftrace_graph_call:
960 b ftrace_graph_stub
961_GLOBAL(ftrace_graph_stub)
962#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -0400963 ld r0, 128(r1)
964 mtlr r0
965 addi r1, r1, 112
966_GLOBAL(ftrace_stub)
967 blr
968#else
969_GLOBAL(mcount)
970 blr
971
972_GLOBAL(_mcount)
973 /* Taken from output of objdump from lib64/glibc */
974 mflr r3
975 ld r11, 0(r1)
976 stdu r1, -112(r1)
977 std r3, 128(r1)
978 ld r4, 16(r11)
979
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530980 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400981 LOAD_REG_ADDR(r5,ftrace_trace_function)
982 ld r5,0(r5)
983 ld r5,0(r5)
984 mtctr r5
985 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -0400986 nop
Steven Rostedt6794c782009-02-09 21:10:27 -0800987
988
989#ifdef CONFIG_FUNCTION_GRAPH_TRACER
990 b ftrace_graph_caller
991#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -0400992 ld r0, 128(r1)
993 mtlr r0
994 addi r1, r1, 112
995_GLOBAL(ftrace_stub)
996 blr
997
Steven Rostedt6794c782009-02-09 21:10:27 -0800998#endif /* CONFIG_DYNAMIC_FTRACE */
999
1000#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt46542882009-02-10 22:19:54 -08001001_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001002 /* load r4 with local address */
1003 ld r4, 128(r1)
1004 subi r4, r4, MCOUNT_INSN_SIZE
1005
1006 /* get the parent address */
1007 ld r11, 112(r1)
1008 addi r3, r11, 16
1009
1010 bl .prepare_ftrace_return
1011 nop
1012
1013 ld r0, 128(r1)
1014 mtlr r0
1015 addi r1, r1, 112
1016 blr
1017
1018_GLOBAL(return_to_handler)
1019 /* need to save return values */
Steven Rostedtbb725342009-02-11 12:45:49 -08001020 std r4, -24(r1)
1021 std r3, -16(r1)
1022 std r31, -8(r1)
1023 mr r31, r1
1024 stdu r1, -112(r1)
1025
1026 bl .ftrace_return_to_handler
1027 nop
1028
1029 /* return value has real return address */
1030 mtlr r3
1031
1032 ld r1, 0(r1)
1033 ld r4, -24(r1)
1034 ld r3, -16(r1)
1035 ld r31, -8(r1)
1036
1037 /* Jump back to real return address */
1038 blr
1039
1040_GLOBAL(mod_return_to_handler)
1041 /* need to save return values */
Steven Rostedt6794c782009-02-09 21:10:27 -08001042 std r4, -32(r1)
1043 std r3, -24(r1)
1044 /* save TOC */
1045 std r2, -16(r1)
1046 std r31, -8(r1)
1047 mr r31, r1
1048 stdu r1, -112(r1)
1049
Steven Rostedtbb725342009-02-11 12:45:49 -08001050 /*
1051 * We are in a module using the module's TOC.
1052 * Switch to our TOC to run inside the core kernel.
1053 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001054 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001055
1056 bl .ftrace_return_to_handler
1057 nop
1058
1059 /* return value has real return address */
1060 mtlr r3
1061
1062 ld r1, 0(r1)
1063 ld r4, -32(r1)
1064 ld r3, -24(r1)
1065 ld r2, -16(r1)
1066 ld r31, -8(r1)
1067
1068 /* Jump back to real return address */
1069 blr
1070#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1071#endif /* CONFIG_FUNCTION_TRACER */