blob: 0c3764ba8d49728b9ae9c80659ce312fce0eb08f [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100045 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100046
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110064 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerrasab598b62008-11-30 11:49:45 +000065 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */
71 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100072 std r2,GPR2(r1)
73 std r3,GPR3(r1)
74 std r4,GPR4(r1)
75 std r5,GPR5(r1)
76 std r6,GPR6(r1)
77 std r7,GPR7(r1)
78 std r8,GPR8(r1)
79 li r11,0
80 std r11,GPR9(r1)
81 std r11,GPR10(r1)
82 std r11,GPR11(r1)
83 std r11,GPR12(r1)
84 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100085 mfcr r9
86 mflr r10
87 li r11,0xc01
88 std r9,_CCR(r1)
89 std r10,_LINK(r1)
90 std r11,_TRAP(r1)
91 mfxer r9
92 mfctr r10
93 std r9,_XER(r1)
94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000100#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
101BEGIN_FW_FTR_SECTION
102 beq 33f
103 /* if from user, see if there are any DTL entries to process */
104 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
105 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
106 ld r10,LPPACA_DTLIDX(r10) /* get log write index */
107 cmpd cr1,r11,r10
108 beq+ cr1,33f
109 bl .accumulate_stolen_time
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
11433:
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
116#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
117
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000118#ifdef CONFIG_TRACE_IRQFLAGS
119 bl .trace_hardirqs_on
120 REST_GPR(0,r1)
121 REST_4GPRS(3,r1)
122 REST_2GPRS(7,r1)
123 addi r9,r1,STACK_FRAME_OVERHEAD
124 ld r12,_MSR(r1)
125#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000126 li r10,1
127 stb r10,PACASOFTIRQEN(r13)
128 stb r10,PACAHARDIRQEN(r13)
129 std r10,SOFTE(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000130
131 /* Hard enable interrupts */
132#ifdef CONFIG_PPC_BOOK3E
133 wrteei 1
134#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000135 mfmsr r11
136 ori r11,r11,MSR_EE
137 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000138#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000139
140#ifdef SHOW_SYSCALLS
141 bl .do_show_syscall
142 REST_GPR(0,r1)
143 REST_4GPRS(3,r1)
144 REST_2GPRS(7,r1)
145 addi r9,r1,STACK_FRAME_OVERHEAD
146#endif
147 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000148 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000149 andi. r11,r10,_TIF_SYSCALL_T_OR_A
150 bne- syscall_dotrace
151syscall_dotrace_cont:
152 cmpldi 0,r0,NR_syscalls
153 bge- syscall_enosys
154
155system_call: /* label this so stack traces look sane */
156/*
157 * Need to vector to 32 Bit or default sys_call_table here,
158 * based on caller's run-mode / personality.
159 */
160 ld r11,.SYS_CALL_TABLE@toc(2)
161 andi. r10,r10,_TIF_32BIT
162 beq 15f
163 addi r11,r11,8 /* use 32-bit syscall entries */
164 clrldi r3,r3,32
165 clrldi r4,r4,32
166 clrldi r5,r5,32
167 clrldi r6,r6,32
168 clrldi r7,r7,32
169 clrldi r8,r8,32
17015:
171 slwi r0,r0,4
172 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
173 mtctr r10
174 bctrl /* Call handler */
175
176syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000177 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000178#ifdef SHOW_SYSCALLS
179 bl .do_show_syscall_exit
180 ld r3,RESULT(r1)
181#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000182 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000183
Paul Mackerras9994a332005-10-10 22:36:14 +1000184 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000185#ifdef CONFIG_PPC_BOOK3S
186 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000187 andi. r10,r8,MSR_RI
188 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000189#endif
190
191 /* Disable interrupts so current_thread_info()->flags can't change,
192 * and so that we don't get interrupted after loading SRR0/1.
193 */
194#ifdef CONFIG_PPC_BOOK3E
195 wrteei 0
196#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000197 mfmsr r10
198 rldicl r10,r10,48,1
199 rotldi r10,r10,16
200 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000201#endif /* CONFIG_PPC_BOOK3E */
202
Paul Mackerras9994a332005-10-10 22:36:14 +1000203 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000204 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100205 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000206 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000207 cmpld r3,r11
208 ld r5,_CCR(r1)
209 bge- syscall_error
210syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000211 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000212BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000213 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000214END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000215 andi. r6,r8,MSR_PR
216 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100217 /*
218 * Clear RI before restoring r13. If we are returning to
219 * userspace and we take an exception after restoring r13,
220 * we end up corrupting the userspace r13 value.
221 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000222#ifdef CONFIG_PPC_BOOK3S
223 /* No MSR:RI on BookE */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100224 li r12,MSR_RI
225 andc r11,r10,r12
226 mtmsrd r11,1 /* clear MSR.RI */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000227#endif /* CONFIG_PPC_BOOK3S */
228
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100229 beq- 1f
230 ACCOUNT_CPU_USER_EXIT(r11, r12)
231 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002321: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000233 ld r1,GPR1(r1)
234 mtlr r4
235 mtcr r5
236 mtspr SPRN_SRR0,r7
237 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000238 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000239 b . /* prevent speculative execution */
240
David Woodhouse401d1f02005-11-15 18:52:18 +0000241syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000242 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000243 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000244 std r5,_CCR(r1)
245 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000246
Paul Mackerras9994a332005-10-10 22:36:14 +1000247/* Traced system call support */
248syscall_dotrace:
249 bl .save_nvgprs
250 addi r3,r1,STACK_FRAME_OVERHEAD
251 bl .do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000252 /*
253 * Restore argument registers possibly just changed.
254 * We use the return value of do_syscall_trace_enter
255 * for the call number to look up in the table (r0).
256 */
257 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000258 ld r3,GPR3(r1)
259 ld r4,GPR4(r1)
260 ld r5,GPR5(r1)
261 ld r6,GPR6(r1)
262 ld r7,GPR7(r1)
263 ld r8,GPR8(r1)
264 addi r9,r1,STACK_FRAME_OVERHEAD
265 clrrdi r10,r1,THREAD_SHIFT
266 ld r10,TI_FLAGS(r10)
267 b syscall_dotrace_cont
268
David Woodhouse401d1f02005-11-15 18:52:18 +0000269syscall_enosys:
270 li r3,-ENOSYS
271 b syscall_exit
272
273syscall_exit_work:
274 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
275 If TIF_NOERROR is set, just save r3 as it is. */
276
277 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100278 beq+ 0f
279 REST_NVGPRS(r1)
280 b 2f
2810: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000282 blt+ 1f
283 andi. r0,r9,_TIF_NOERROR
284 bne- 1f
285 ld r5,_CCR(r1)
286 neg r3,r3
287 oris r5,r5,0x1000 /* Set SO bit in CR */
288 std r5,_CCR(r1)
2891: std r3,GPR3(r1)
2902: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
291 beq 4f
292
Paul Mackerras1bd79332006-03-08 13:24:22 +1100293 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000294
295 li r11,_TIF_PERSYSCALL_MASK
296 addi r12,r12,TI_FLAGS
2973: ldarx r10,0,r12
298 andc r10,r10,r11
299 stdcx. r10,0,r12
300 bne- 3b
301 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100302
3034: /* Anything else left to do? */
304 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000305 beq .ret_from_except_lite
306
307 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000308#ifdef CONFIG_PPC_BOOK3E
309 wrteei 1
310#else
David Woodhouse401d1f02005-11-15 18:52:18 +0000311 mfmsr r10
312 ori r10,r10,MSR_EE
313 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000314#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000315
Paul Mackerras1bd79332006-03-08 13:24:22 +1100316 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000317 addi r3,r1,STACK_FRAME_OVERHEAD
318 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100319 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000320
321/* Save non-volatile GPRs, if not already saved. */
322_GLOBAL(save_nvgprs)
323 ld r11,_TRAP(r1)
324 andi. r0,r11,1
325 beqlr-
326 SAVE_NVGPRS(r1)
327 clrrdi r0,r11,1
328 std r0,_TRAP(r1)
329 blr
330
David Woodhouse401d1f02005-11-15 18:52:18 +0000331
Paul Mackerras9994a332005-10-10 22:36:14 +1000332/*
333 * The sigsuspend and rt_sigsuspend system calls can call do_signal
334 * and thus put the process into the stopped state where we might
335 * want to examine its user state with ptrace. Therefore we need
336 * to save all the nonvolatile registers (r14 - r31) before calling
337 * the C code. Similarly, fork, vfork and clone need the full
338 * register state on the stack so that it can be copied to the child.
339 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000340
341_GLOBAL(ppc_fork)
342 bl .save_nvgprs
343 bl .sys_fork
344 b syscall_exit
345
346_GLOBAL(ppc_vfork)
347 bl .save_nvgprs
348 bl .sys_vfork
349 b syscall_exit
350
351_GLOBAL(ppc_clone)
352 bl .save_nvgprs
353 bl .sys_clone
354 b syscall_exit
355
Paul Mackerras1bd79332006-03-08 13:24:22 +1100356_GLOBAL(ppc32_swapcontext)
357 bl .save_nvgprs
358 bl .compat_sys_swapcontext
359 b syscall_exit
360
361_GLOBAL(ppc64_swapcontext)
362 bl .save_nvgprs
363 bl .sys_swapcontext
364 b syscall_exit
365
Paul Mackerras9994a332005-10-10 22:36:14 +1000366_GLOBAL(ret_from_fork)
367 bl .schedule_tail
368 REST_NVGPRS(r1)
369 li r3,0
370 b syscall_exit
371
372/*
373 * This routine switches between two different tasks. The process
374 * state of one is saved on its kernel stack. Then the state
375 * of the other is restored from its kernel stack. The memory
376 * management hardware is updated to the second process's state.
377 * Finally, we can return to the second process, via ret_from_except.
378 * On entry, r3 points to the THREAD for the current task, r4
379 * points to the THREAD for the new task.
380 *
381 * Note: there are two ways to get to the "going out" portion
382 * of this code; either by coming in via the entry (_switch)
383 * or via "fork" which must set up an environment equivalent
384 * to the "_switch" path. If you change this you'll have to change
385 * the fork code also.
386 *
387 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600388 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000389 */
390 .align 7
391_GLOBAL(_switch)
392 mflr r0
393 std r0,16(r1)
394 stdu r1,-SWITCH_FRAME_SIZE(r1)
395 /* r3-r13 are caller saved -- Cort */
396 SAVE_8GPRS(14, r1)
397 SAVE_10GPRS(22, r1)
398 mflr r20 /* Return to switch caller */
399 mfmsr r22
400 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000401#ifdef CONFIG_VSX
402BEGIN_FTR_SECTION
403 oris r0,r0,MSR_VSX@h /* Disable VSX */
404END_FTR_SECTION_IFSET(CPU_FTR_VSX)
405#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000406#ifdef CONFIG_ALTIVEC
407BEGIN_FTR_SECTION
408 oris r0,r0,MSR_VEC@h /* Disable altivec */
409 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
410 std r24,THREAD_VRSAVE(r3)
411END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
412#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000413#ifdef CONFIG_PPC64
414BEGIN_FTR_SECTION
415 mfspr r25,SPRN_DSCR
416 std r25,THREAD_DSCR(r3)
417END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
418#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000419 and. r0,r0,r22
420 beq+ 1f
421 andc r22,r22,r0
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000422 MTMSRD(r22)
Paul Mackerras9994a332005-10-10 22:36:14 +1000423 isync
4241: std r20,_NIP(r1)
425 mfcr r23
426 std r23,_CCR(r1)
427 std r1,KSP(r3) /* Set old stack pointer */
428
429#ifdef CONFIG_SMP
430 /* We need a sync somewhere here to make sure that if the
431 * previous task gets rescheduled on another CPU, it sees all
432 * stores it has performed on this one.
433 */
434 sync
435#endif /* CONFIG_SMP */
436
Anton Blanchardf89451f2010-08-11 01:40:27 +0000437 /*
438 * If we optimise away the clear of the reservation in system
439 * calls because we know the CPU tracks the address of the
440 * reservation, then we need to clear it here to cover the
441 * case that the kernel context switch path has no larx
442 * instructions.
443 */
444BEGIN_FTR_SECTION
445 ldarx r6,0,r1
446END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
447
Paul Mackerras9994a332005-10-10 22:36:14 +1000448 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
449 std r6,PACACURRENT(r13) /* Set new 'current' */
450
451 ld r8,KSP(r4) /* new stack pointer */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000452#ifdef CONFIG_PPC_BOOK3S
Paul Mackerras9994a332005-10-10 22:36:14 +1000453BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000454 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000455 clrrdi r6,r8,28 /* get its ESID */
456 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000457 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000458 clrrdi r6,r8,40 /* get its 1T ESID */
459 clrrdi r9,r1,40 /* get current sp 1T ESID */
Matt Evans44ae3ab2011-04-06 19:48:50 +0000460 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
Michael Ellermanc2303282008-06-24 11:33:05 +1000461FTR_SECTION_ELSE
462 b 2f
Matt Evans44ae3ab2011-04-06 19:48:50 +0000463ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000464 clrldi. r0,r6,2 /* is new ESID c00000000? */
465 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
466 cror eq,4*cr1+eq,eq
467 beq 2f /* if yes, don't slbie it */
468
469 /* Bolt in the new stack SLB entry */
470 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
471 oris r0,r6,(SLB_ESID_V)@h
472 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000473BEGIN_FTR_SECTION
474 li r9,MMU_SEGSIZE_1T /* insert B field */
475 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
476 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
Matt Evans44ae3ab2011-04-06 19:48:50 +0000477END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000478
Michael Neuling00efee72007-08-24 16:58:37 +1000479 /* Update the last bolted SLB. No write barriers are needed
480 * here, provided we only update the current CPU's SLB shadow
481 * buffer.
482 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000483 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000484 li r12,0
485 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
486 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
487 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000488
Matt Evans44ae3ab2011-04-06 19:48:50 +0000489 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
Olof Johanssonf66bce52007-10-16 00:58:59 +1000490 * we have 1TB segments, the only CPUs known to have the errata
491 * only support less than 1TB of system memory and we'll never
492 * actually hit this code path.
493 */
494
Paul Mackerras9994a332005-10-10 22:36:14 +1000495 slbie r6
496 slbie r6 /* Workaround POWER5 < DD2.1 issue */
497 slbmte r7,r0
498 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10004992:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000500#endif /* !CONFIG_PPC_BOOK3S */
501
Paul Mackerras9994a332005-10-10 22:36:14 +1000502 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
503 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
504 because we don't need to leave the 288-byte ABI gap at the
505 top of the kernel stack. */
506 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
507
508 mr r1,r8 /* start using new stack pointer */
509 std r7,PACAKSAVE(r13)
510
511 ld r6,_CCR(r1)
512 mtcrf 0xFF,r6
513
514#ifdef CONFIG_ALTIVEC
515BEGIN_FTR_SECTION
516 ld r0,THREAD_VRSAVE(r4)
517 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
518END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
519#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000520#ifdef CONFIG_PPC64
521BEGIN_FTR_SECTION
522 ld r0,THREAD_DSCR(r4)
523 cmpd r0,r25
524 beq 1f
525 mtspr SPRN_DSCR,r0
5261:
527END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
528#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000529
530 /* r3-r13 are destroyed -- Cort */
531 REST_8GPRS(14, r1)
532 REST_10GPRS(22, r1)
533
534 /* convert old thread to its task_struct for return value */
535 addi r3,r3,-THREAD
536 ld r7,_NIP(r1) /* Return to _switch caller in new task */
537 mtlr r7
538 addi r1,r1,SWITCH_FRAME_SIZE
539 blr
540
541 .align 7
542_GLOBAL(ret_from_except)
543 ld r11,_TRAP(r1)
544 andi. r0,r11,1
545 bne .ret_from_except_lite
546 REST_NVGPRS(r1)
547
548_GLOBAL(ret_from_except_lite)
549 /*
550 * Disable interrupts so that current_thread_info()->flags
551 * can't change between when we test it and when we return
552 * from the interrupt.
553 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000554#ifdef CONFIG_PPC_BOOK3E
555 wrteei 0
556#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000557 mfmsr r10 /* Get current interrupt state */
558 rldicl r9,r10,48,1 /* clear MSR_EE */
559 rotldi r9,r9,16
560 mtmsrd r9,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000561#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000562
563#ifdef CONFIG_PREEMPT
564 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
565 li r0,_TIF_NEED_RESCHED /* bits to check */
566 ld r3,_MSR(r1)
567 ld r4,TI_FLAGS(r9)
568 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
569 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
570 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
571 bne do_work
572
573#else /* !CONFIG_PREEMPT */
574 ld r3,_MSR(r1) /* Returning to user mode? */
575 andi. r3,r3,MSR_PR
576 beq restore /* if not, just restore regs and return */
577
578 /* Check current_thread_info()->flags */
579 clrrdi r9,r1,THREAD_SHIFT
580 ld r4,TI_FLAGS(r9)
581 andi. r0,r4,_TIF_USER_WORK_MASK
582 bne do_work
Benjamin Herrenschmidt4f8cf362012-02-28 13:44:58 +1100583#endif /* !CONFIG_PREEMPT */
Paul Mackerras9994a332005-10-10 22:36:14 +1000584
585restore:
Michael Ellerman01f38802008-07-16 14:21:34 +1000586 ld r5,SOFTE(r1)
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000587 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000588
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000589 /* extract EE bit and use it to restore paca->hard_enabled */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100590 ld r3,_MSR(r1)
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000591 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
592 stb r4,PACAHARDIRQEN(r13)
593
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000594#ifdef CONFIG_PPC_BOOK3E
595 b .exception_return_book3e
596#else
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100597 ld r4,_CTR(r1)
598 ld r0,_LINK(r1)
599 mtctr r4
600 mtlr r0
601 ld r4,_XER(r1)
602 mtspr SPRN_XER,r4
603
604 REST_8GPRS(5, r1)
605
606 andi. r0,r3,MSR_RI
607 beq- unrecov_restore
608
Anton Blanchardf89451f2010-08-11 01:40:27 +0000609 /*
610 * Clear the reservation. If we know the CPU tracks the address of
611 * the reservation then we can potentially save some cycles and use
612 * a larx. On POWER6 and POWER7 this is significantly faster.
613 */
614BEGIN_FTR_SECTION
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100615 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000616FTR_SECTION_ELSE
617 ldarx r4,0,r1
618ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100619
620 /*
621 * Clear RI before restoring r13. If we are returning to
622 * userspace and we take an exception after restoring r13,
623 * we end up corrupting the userspace r13 value.
624 */
625 mfmsr r4
626 andc r4,r4,r0 /* r0 contains MSR_RI here */
627 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000628
629 /*
630 * r13 is our per cpu area, only restore it if we are returning to
631 * userspace
632 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100633 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000634 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100635 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000636 REST_GPR(13, r1)
6371:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100638 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000639
640 ld r2,_CCR(r1)
641 mtcrf 0xFF,r2
642 ld r2,_NIP(r1)
643 mtspr SPRN_SRR0,r2
644
645 ld r0,GPR0(r1)
646 ld r2,GPR2(r1)
647 ld r3,GPR3(r1)
648 ld r4,GPR4(r1)
649 ld r1,GPR1(r1)
650
651 rfid
652 b . /* prevent speculative execution */
653
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000654#endif /* CONFIG_PPC_BOOK3E */
655
Paul Mackerras9994a332005-10-10 22:36:14 +1000656do_work:
657#ifdef CONFIG_PREEMPT
658 andi. r0,r3,MSR_PR /* Returning to user mode? */
659 bne user_work
660 /* Check that preempt_count() == 0 and interrupts are enabled */
661 lwz r8,TI_PREEMPT(r9)
662 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000663 ld r0,SOFTE(r1)
664 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000665 crandc eq,cr1*4+eq,eq
666 bne restore
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000667
668 /* Here we are preempting the current task.
669 *
670 * Ensure interrupts are soft-disabled. We also properly mark
671 * the PACA to reflect the fact that they are hard-disabled
672 * and trace the change
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000673 */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000674 li r0,0
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000675 stb r0,PACASOFTIRQEN(r13)
676 stb r0,PACAHARDIRQEN(r13)
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000677 TRACE_DISABLE_INTS
678
679 /* Call the scheduler with soft IRQs off */
6801: bl .preempt_schedule_irq
681
682 /* Hard-disable interrupts again (and update PACA) */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000683#ifdef CONFIG_PPC_BOOK3E
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000684 wrteei 0
685#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000686 mfmsr r10
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000687 rldicl r10,r10,48,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000688 rotldi r10,r10,16
689 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000690#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000691 li r0,0
692 stb r0,PACAHARDIRQEN(r13)
693
694 /* Re-test flags and eventually loop */
695 clrrdi r9,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000696 ld r4,TI_FLAGS(r9)
697 andi. r0,r4,_TIF_NEED_RESCHED
698 bne 1b
699 b restore
700
701user_work:
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000702#endif /* CONFIG_PREEMPT */
703
Paul Mackerras9994a332005-10-10 22:36:14 +1000704 /* Enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000705#ifdef CONFIG_PPC_BOOK3E
706 wrteei 1
707#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000708 ori r10,r10,MSR_EE
709 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000710#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000711
712 andi. r0,r4,_TIF_NEED_RESCHED
713 beq 1f
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +1100714 li r5,1
715 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000716 bl .schedule
717 b .ret_from_except_lite
718
7191: bl .save_nvgprs
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +1100720 li r5,1
721 TRACE_AND_RESTORE_IRQ(r5);
Roland McGrath7d6d6372008-07-27 16:52:52 +1000722 addi r3,r1,STACK_FRAME_OVERHEAD
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +1100723 bl .do_notify_resume
Paul Mackerras9994a332005-10-10 22:36:14 +1000724 b .ret_from_except
725
726unrecov_restore:
727 addi r3,r1,STACK_FRAME_OVERHEAD
728 bl .unrecoverable_exception
729 b unrecov_restore
730
731#ifdef CONFIG_PPC_RTAS
732/*
733 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
734 * called with the MMU off.
735 *
736 * In addition, we need to be in 32b mode, at least for now.
737 *
738 * Note: r3 is an input parameter to rtas, so don't trash it...
739 */
740_GLOBAL(enter_rtas)
741 mflr r0
742 std r0,16(r1)
743 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
744
745 /* Because RTAS is running in 32b mode, it clobbers the high order half
746 * of all registers that it saves. We therefore save those registers
747 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
748 */
749 SAVE_GPR(2, r1) /* Save the TOC */
750 SAVE_GPR(13, r1) /* Save paca */
751 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
752 SAVE_10GPRS(22, r1) /* ditto */
753
754 mfcr r4
755 std r4,_CCR(r1)
756 mfctr r5
757 std r5,_CTR(r1)
758 mfspr r6,SPRN_XER
759 std r6,_XER(r1)
760 mfdar r7
761 std r7,_DAR(r1)
762 mfdsisr r8
763 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000764
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800765 /* Temporary workaround to clear CR until RTAS can be modified to
766 * ignore all bits.
767 */
768 li r0,0
769 mtcr r0
770
David Woodhouse007d88d2007-01-01 18:45:34 +0000771#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000772 /* There is no way it is acceptable to get here with interrupts enabled,
773 * check it with the asm equivalent of WARN_ON
774 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000775 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10007761: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000777 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
778#endif
779
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000780 /* Hard-disable interrupts */
781 mfmsr r6
782 rldicl r7,r6,48,1
783 rotldi r7,r7,16
784 mtmsrd r7,1
785
Paul Mackerras9994a332005-10-10 22:36:14 +1000786 /* Unfortunately, the stack pointer and the MSR are also clobbered,
787 * so they are saved in the PACA which allows us to restore
788 * our original state after RTAS returns.
789 */
790 std r1,PACAR1(r13)
791 std r6,PACASAVEDMSR(r13)
792
793 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100794 LOAD_REG_ADDR(r4,.rtas_return_loc)
795 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000796 mtlr r4
797
798 li r0,0
799 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
800 andc r0,r6,r0
801
802 li r9,1
803 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Anton Blanchard44c9f3c2010-02-07 19:37:29 +0000804 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
Paul Mackerras9994a332005-10-10 22:36:14 +1000805 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +1000806 sync /* disable interrupts so SRR0/1 */
807 mtmsrd r0 /* don't get trashed */
808
David Gibsone58c3492006-01-13 14:56:25 +1100809 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000810 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
811 ld r4,RTASBASE(r4) /* get the rtas->base value */
812
813 mtspr SPRN_SRR0,r5
814 mtspr SPRN_SRR1,r6
815 rfid
816 b . /* prevent speculative execution */
817
818_STATIC(rtas_return_loc)
819 /* relocation is off at this point */
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +1100820 GET_PACA(r4)
David Gibsone58c3492006-01-13 14:56:25 +1100821 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000822
Paul Mackerrase31aa452008-08-30 11:41:12 +1000823 bcl 20,31,$+4
8240: mflr r3
825 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
826
Paul Mackerras9994a332005-10-10 22:36:14 +1000827 mfmsr r6
828 li r0,MSR_RI
829 andc r6,r6,r0
830 sync
831 mtmsrd r6
832
833 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +1000834 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
835
836 mtspr SPRN_SRR0,r3
837 mtspr SPRN_SRR1,r4
838 rfid
839 b . /* prevent speculative execution */
840
Paul Mackerrase31aa452008-08-30 11:41:12 +1000841 .align 3
8421: .llong .rtas_restore_regs
843
Paul Mackerras9994a332005-10-10 22:36:14 +1000844_STATIC(rtas_restore_regs)
845 /* relocation is on at this point */
846 REST_GPR(2, r1) /* Restore the TOC */
847 REST_GPR(13, r1) /* Restore paca */
848 REST_8GPRS(14, r1) /* Restore the non-volatiles */
849 REST_10GPRS(22, r1) /* ditto */
850
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +1100851 GET_PACA(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000852
853 ld r4,_CCR(r1)
854 mtcr r4
855 ld r5,_CTR(r1)
856 mtctr r5
857 ld r6,_XER(r1)
858 mtspr SPRN_XER,r6
859 ld r7,_DAR(r1)
860 mtdar r7
861 ld r8,_DSISR(r1)
862 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +1000863
864 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
865 ld r0,16(r1) /* get return address */
866
867 mtlr r0
868 blr /* return to caller */
869
870#endif /* CONFIG_PPC_RTAS */
871
Paul Mackerras9994a332005-10-10 22:36:14 +1000872_GLOBAL(enter_prom)
873 mflr r0
874 std r0,16(r1)
875 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
876
877 /* Because PROM is running in 32b mode, it clobbers the high order half
878 * of all registers that it saves. We therefore save those registers
879 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
880 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000881 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000882 SAVE_GPR(13, r1)
883 SAVE_8GPRS(14, r1)
884 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000885 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +1000886 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000887 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000888 std r11,_MSR(r1)
889
890 /* Get the PROM entrypoint */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000891 mtlr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000892
893 /* Switch MSR to 32 bits mode
894 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000895#ifdef CONFIG_PPC_BOOK3E
896 rlwinm r11,r11,0,1,31
897 mtmsr r11
898#else /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000899 mfmsr r11
900 li r12,1
901 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
902 andc r11,r11,r12
903 li r12,1
904 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
905 andc r11,r11,r12
906 mtmsrd r11
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000907#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000908 isync
909
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000910 /* Enter PROM here... */
Paul Mackerras9994a332005-10-10 22:36:14 +1000911 blrl
912
913 /* Just make sure that r1 top 32 bits didn't get
914 * corrupt by OF
915 */
916 rldicl r1,r1,0,32
917
918 /* Restore the MSR (back to 64 bits) */
919 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000920 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +1000921 isync
922
923 /* Restore other registers */
924 REST_GPR(2, r1)
925 REST_GPR(13, r1)
926 REST_8GPRS(14, r1)
927 REST_10GPRS(22, r1)
928 ld r4,_CCR(r1)
929 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000930
931 addi r1,r1,PROM_FRAME_SIZE
932 ld r0,16(r1)
933 mtlr r0
934 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -0400935
Steven Rostedt606576c2008-10-06 19:06:12 -0400936#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -0400937#ifdef CONFIG_DYNAMIC_FTRACE
938_GLOBAL(mcount)
939_GLOBAL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -0400940 blr
941
942_GLOBAL(ftrace_caller)
943 /* Taken from output of objdump from lib64/glibc */
944 mflr r3
945 ld r11, 0(r1)
946 stdu r1, -112(r1)
947 std r3, 128(r1)
948 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530949 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400950.globl ftrace_call
951ftrace_call:
952 bl ftrace_stub
953 nop
Steven Rostedt46542882009-02-10 22:19:54 -0800954#ifdef CONFIG_FUNCTION_GRAPH_TRACER
955.globl ftrace_graph_call
956ftrace_graph_call:
957 b ftrace_graph_stub
958_GLOBAL(ftrace_graph_stub)
959#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -0400960 ld r0, 128(r1)
961 mtlr r0
962 addi r1, r1, 112
963_GLOBAL(ftrace_stub)
964 blr
965#else
966_GLOBAL(mcount)
967 blr
968
969_GLOBAL(_mcount)
970 /* Taken from output of objdump from lib64/glibc */
971 mflr r3
972 ld r11, 0(r1)
973 stdu r1, -112(r1)
974 std r3, 128(r1)
975 ld r4, 16(r11)
976
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530977 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400978 LOAD_REG_ADDR(r5,ftrace_trace_function)
979 ld r5,0(r5)
980 ld r5,0(r5)
981 mtctr r5
982 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -0400983 nop
Steven Rostedt6794c782009-02-09 21:10:27 -0800984
985
986#ifdef CONFIG_FUNCTION_GRAPH_TRACER
987 b ftrace_graph_caller
988#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -0400989 ld r0, 128(r1)
990 mtlr r0
991 addi r1, r1, 112
992_GLOBAL(ftrace_stub)
993 blr
994
Steven Rostedt6794c782009-02-09 21:10:27 -0800995#endif /* CONFIG_DYNAMIC_FTRACE */
996
997#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt46542882009-02-10 22:19:54 -0800998_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -0800999 /* load r4 with local address */
1000 ld r4, 128(r1)
1001 subi r4, r4, MCOUNT_INSN_SIZE
1002
1003 /* get the parent address */
1004 ld r11, 112(r1)
1005 addi r3, r11, 16
1006
1007 bl .prepare_ftrace_return
1008 nop
1009
1010 ld r0, 128(r1)
1011 mtlr r0
1012 addi r1, r1, 112
1013 blr
1014
1015_GLOBAL(return_to_handler)
1016 /* need to save return values */
Steven Rostedtbb725342009-02-11 12:45:49 -08001017 std r4, -24(r1)
1018 std r3, -16(r1)
1019 std r31, -8(r1)
1020 mr r31, r1
1021 stdu r1, -112(r1)
1022
1023 bl .ftrace_return_to_handler
1024 nop
1025
1026 /* return value has real return address */
1027 mtlr r3
1028
1029 ld r1, 0(r1)
1030 ld r4, -24(r1)
1031 ld r3, -16(r1)
1032 ld r31, -8(r1)
1033
1034 /* Jump back to real return address */
1035 blr
1036
1037_GLOBAL(mod_return_to_handler)
1038 /* need to save return values */
Steven Rostedt6794c782009-02-09 21:10:27 -08001039 std r4, -32(r1)
1040 std r3, -24(r1)
1041 /* save TOC */
1042 std r2, -16(r1)
1043 std r31, -8(r1)
1044 mr r31, r1
1045 stdu r1, -112(r1)
1046
Steven Rostedtbb725342009-02-11 12:45:49 -08001047 /*
1048 * We are in a module using the module's TOC.
1049 * Switch to our TOC to run inside the core kernel.
1050 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001051 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001052
1053 bl .ftrace_return_to_handler
1054 nop
1055
1056 /* return value has real return address */
1057 mtlr r3
1058
1059 ld r1, 0(r1)
1060 ld r4, -32(r1)
1061 ld r3, -24(r1)
1062 ld r2, -16(r1)
1063 ld r31, -8(r1)
1064
1065 /* Jump back to real return address */
1066 blr
1067#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1068#endif /* CONFIG_FUNCTION_TRACER */