blob: f8a7a1a1a9f4480ac1881f907da771a474606998 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110035#include <asm/hw_irq.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100036
37/*
38 * System calls.
39 */
40 .section ".toc","aw"
41.SYS_CALL_TABLE:
42 .tc .sys_call_table[TC],.sys_call_table
43
44/* This value is used to mark exception frames on the stack. */
45exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100046 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100047
48 .section ".text"
49 .align 7
50
51#undef SHOW_SYSCALLS
52
53 .globl system_call_common
54system_call_common:
55 andi. r10,r12,MSR_PR
56 mr r10,r1
57 addi r1,r1,-INT_FRAME_SIZE
58 beq- 1f
59 ld r1,PACAKSAVE(r13)
601: std r10,0(r1)
61 std r11,_NIP(r1)
62 std r12,_MSR(r1)
63 std r0,GPR0(r1)
64 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110065 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerrasab598b62008-11-30 11:49:45 +000066 /*
67 * This "crclr so" clears CR0.SO, which is the error indication on
68 * return from this system call. There must be no cmp instruction
69 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
70 * CR0.SO will get set, causing all system calls to appear to fail.
71 */
72 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100073 std r2,GPR2(r1)
74 std r3,GPR3(r1)
75 std r4,GPR4(r1)
76 std r5,GPR5(r1)
77 std r6,GPR6(r1)
78 std r7,GPR7(r1)
79 std r8,GPR8(r1)
80 li r11,0
81 std r11,GPR9(r1)
82 std r11,GPR10(r1)
83 std r11,GPR11(r1)
84 std r11,GPR12(r1)
85 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100086 mfcr r9
87 mflr r10
88 li r11,0xc01
89 std r9,_CCR(r1)
90 std r10,_LINK(r1)
91 std r11,_TRAP(r1)
92 mfxer r9
93 mfctr r10
94 std r9,_XER(r1)
95 std r10,_CTR(r1)
96 std r3,ORIG_GPR3(r1)
97 ld r2,PACATOC(r13)
98 addi r9,r1,STACK_FRAME_OVERHEAD
99 ld r11,exception_marker@toc(r2)
100 std r11,-16(r9) /* "regshere" marker */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000101#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
102BEGIN_FW_FTR_SECTION
103 beq 33f
104 /* if from user, see if there are any DTL entries to process */
105 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
106 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
107 ld r10,LPPACA_DTLIDX(r10) /* get log write index */
108 cmpd cr1,r11,r10
109 beq+ cr1,33f
110 bl .accumulate_stolen_time
111 REST_GPR(0,r1)
112 REST_4GPRS(3,r1)
113 REST_2GPRS(7,r1)
114 addi r9,r1,STACK_FRAME_OVERHEAD
11533:
116END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
117#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
118
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100119 /*
120 * A syscall should always be called with interrupts enabled
121 * so we just unconditionally hard-enable here. When some kind
122 * of irq tracing is used, we additionally check that condition
123 * is correct
124 */
125#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
126 lbz r10,PACASOFTIRQEN(r13)
127 xori r10,r10,1
1281: tdnei r10,0
129 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
130#endif
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000131
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000132#ifdef CONFIG_PPC_BOOK3E
133 wrteei 1
134#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100135 ld r11,PACAKMSR(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000136 ori r11,r11,MSR_EE
137 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000138#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000139
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100140 /* We do need to set SOFTE in the stack frame or the return
141 * from interrupt will be painful
142 */
143 li r10,1
144 std r10,SOFTE(r1)
145
Paul Mackerras9994a332005-10-10 22:36:14 +1000146#ifdef SHOW_SYSCALLS
147 bl .do_show_syscall
148 REST_GPR(0,r1)
149 REST_4GPRS(3,r1)
150 REST_2GPRS(7,r1)
151 addi r9,r1,STACK_FRAME_OVERHEAD
152#endif
153 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000154 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000155 andi. r11,r10,_TIF_SYSCALL_T_OR_A
156 bne- syscall_dotrace
157syscall_dotrace_cont:
158 cmpldi 0,r0,NR_syscalls
159 bge- syscall_enosys
160
161system_call: /* label this so stack traces look sane */
162/*
163 * Need to vector to 32 Bit or default sys_call_table here,
164 * based on caller's run-mode / personality.
165 */
166 ld r11,.SYS_CALL_TABLE@toc(2)
167 andi. r10,r10,_TIF_32BIT
168 beq 15f
169 addi r11,r11,8 /* use 32-bit syscall entries */
170 clrldi r3,r3,32
171 clrldi r4,r4,32
172 clrldi r5,r5,32
173 clrldi r6,r6,32
174 clrldi r7,r7,32
175 clrldi r8,r8,32
17615:
177 slwi r0,r0,4
178 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
179 mtctr r10
180 bctrl /* Call handler */
181
182syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000183 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000184#ifdef SHOW_SYSCALLS
185 bl .do_show_syscall_exit
186 ld r3,RESULT(r1)
187#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000188 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000189
Paul Mackerras9994a332005-10-10 22:36:14 +1000190 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000191#ifdef CONFIG_PPC_BOOK3S
192 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000193 andi. r10,r8,MSR_RI
194 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000195#endif
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100196 /*
197 * Disable interrupts so current_thread_info()->flags can't change,
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000198 * and so that we don't get interrupted after loading SRR0/1.
199 */
200#ifdef CONFIG_PPC_BOOK3E
201 wrteei 0
202#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100203 ld r10,PACAKMSR(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000204 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000205#endif /* CONFIG_PPC_BOOK3E */
206
Paul Mackerras9994a332005-10-10 22:36:14 +1000207 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000208 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100209 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000210 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000211 cmpld r3,r11
212 ld r5,_CCR(r1)
213 bge- syscall_error
214syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000215 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000216BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000217 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000218END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000219 andi. r6,r8,MSR_PR
220 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100221 /*
222 * Clear RI before restoring r13. If we are returning to
223 * userspace and we take an exception after restoring r13,
224 * we end up corrupting the userspace r13 value.
225 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000226#ifdef CONFIG_PPC_BOOK3S
227 /* No MSR:RI on BookE */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100228 li r12,MSR_RI
229 andc r11,r10,r12
230 mtmsrd r11,1 /* clear MSR.RI */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000231#endif /* CONFIG_PPC_BOOK3S */
232
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100233 beq- 1f
234 ACCOUNT_CPU_USER_EXIT(r11, r12)
235 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002361: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000237 ld r1,GPR1(r1)
238 mtlr r4
239 mtcr r5
240 mtspr SPRN_SRR0,r7
241 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000242 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000243 b . /* prevent speculative execution */
244
David Woodhouse401d1f02005-11-15 18:52:18 +0000245syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000246 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000247 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000248 std r5,_CCR(r1)
249 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000250
Paul Mackerras9994a332005-10-10 22:36:14 +1000251/* Traced system call support */
252syscall_dotrace:
253 bl .save_nvgprs
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 bl .do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000256 /*
257 * Restore argument registers possibly just changed.
258 * We use the return value of do_syscall_trace_enter
259 * for the call number to look up in the table (r0).
260 */
261 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000262 ld r3,GPR3(r1)
263 ld r4,GPR4(r1)
264 ld r5,GPR5(r1)
265 ld r6,GPR6(r1)
266 ld r7,GPR7(r1)
267 ld r8,GPR8(r1)
268 addi r9,r1,STACK_FRAME_OVERHEAD
269 clrrdi r10,r1,THREAD_SHIFT
270 ld r10,TI_FLAGS(r10)
271 b syscall_dotrace_cont
272
David Woodhouse401d1f02005-11-15 18:52:18 +0000273syscall_enosys:
274 li r3,-ENOSYS
275 b syscall_exit
276
277syscall_exit_work:
278 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
279 If TIF_NOERROR is set, just save r3 as it is. */
280
281 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100282 beq+ 0f
283 REST_NVGPRS(r1)
284 b 2f
2850: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000286 blt+ 1f
287 andi. r0,r9,_TIF_NOERROR
288 bne- 1f
289 ld r5,_CCR(r1)
290 neg r3,r3
291 oris r5,r5,0x1000 /* Set SO bit in CR */
292 std r5,_CCR(r1)
2931: std r3,GPR3(r1)
2942: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
295 beq 4f
296
Paul Mackerras1bd79332006-03-08 13:24:22 +1100297 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000298
299 li r11,_TIF_PERSYSCALL_MASK
300 addi r12,r12,TI_FLAGS
3013: ldarx r10,0,r12
302 andc r10,r10,r11
303 stdcx. r10,0,r12
304 bne- 3b
305 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100306
3074: /* Anything else left to do? */
308 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000309 beq .ret_from_except_lite
310
311 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000312#ifdef CONFIG_PPC_BOOK3E
313 wrteei 1
314#else
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100315 ld r10,PACAKMSR(r13)
David Woodhouse401d1f02005-11-15 18:52:18 +0000316 ori r10,r10,MSR_EE
317 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000318#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000319
Paul Mackerras1bd79332006-03-08 13:24:22 +1100320 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000321 addi r3,r1,STACK_FRAME_OVERHEAD
322 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100323 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000324
325/* Save non-volatile GPRs, if not already saved. */
326_GLOBAL(save_nvgprs)
327 ld r11,_TRAP(r1)
328 andi. r0,r11,1
329 beqlr-
330 SAVE_NVGPRS(r1)
331 clrrdi r0,r11,1
332 std r0,_TRAP(r1)
333 blr
334
David Woodhouse401d1f02005-11-15 18:52:18 +0000335
Paul Mackerras9994a332005-10-10 22:36:14 +1000336/*
337 * The sigsuspend and rt_sigsuspend system calls can call do_signal
338 * and thus put the process into the stopped state where we might
339 * want to examine its user state with ptrace. Therefore we need
340 * to save all the nonvolatile registers (r14 - r31) before calling
341 * the C code. Similarly, fork, vfork and clone need the full
342 * register state on the stack so that it can be copied to the child.
343 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000344
345_GLOBAL(ppc_fork)
346 bl .save_nvgprs
347 bl .sys_fork
348 b syscall_exit
349
350_GLOBAL(ppc_vfork)
351 bl .save_nvgprs
352 bl .sys_vfork
353 b syscall_exit
354
355_GLOBAL(ppc_clone)
356 bl .save_nvgprs
357 bl .sys_clone
358 b syscall_exit
359
Paul Mackerras1bd79332006-03-08 13:24:22 +1100360_GLOBAL(ppc32_swapcontext)
361 bl .save_nvgprs
362 bl .compat_sys_swapcontext
363 b syscall_exit
364
365_GLOBAL(ppc64_swapcontext)
366 bl .save_nvgprs
367 bl .sys_swapcontext
368 b syscall_exit
369
Paul Mackerras9994a332005-10-10 22:36:14 +1000370_GLOBAL(ret_from_fork)
371 bl .schedule_tail
372 REST_NVGPRS(r1)
373 li r3,0
374 b syscall_exit
375
376/*
377 * This routine switches between two different tasks. The process
378 * state of one is saved on its kernel stack. Then the state
379 * of the other is restored from its kernel stack. The memory
380 * management hardware is updated to the second process's state.
381 * Finally, we can return to the second process, via ret_from_except.
382 * On entry, r3 points to the THREAD for the current task, r4
383 * points to the THREAD for the new task.
384 *
385 * Note: there are two ways to get to the "going out" portion
386 * of this code; either by coming in via the entry (_switch)
387 * or via "fork" which must set up an environment equivalent
388 * to the "_switch" path. If you change this you'll have to change
389 * the fork code also.
390 *
391 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600392 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000393 */
394 .align 7
395_GLOBAL(_switch)
396 mflr r0
397 std r0,16(r1)
398 stdu r1,-SWITCH_FRAME_SIZE(r1)
399 /* r3-r13 are caller saved -- Cort */
400 SAVE_8GPRS(14, r1)
401 SAVE_10GPRS(22, r1)
402 mflr r20 /* Return to switch caller */
403 mfmsr r22
404 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000405#ifdef CONFIG_VSX
406BEGIN_FTR_SECTION
407 oris r0,r0,MSR_VSX@h /* Disable VSX */
408END_FTR_SECTION_IFSET(CPU_FTR_VSX)
409#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000410#ifdef CONFIG_ALTIVEC
411BEGIN_FTR_SECTION
412 oris r0,r0,MSR_VEC@h /* Disable altivec */
413 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
414 std r24,THREAD_VRSAVE(r3)
415END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
416#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000417#ifdef CONFIG_PPC64
418BEGIN_FTR_SECTION
419 mfspr r25,SPRN_DSCR
420 std r25,THREAD_DSCR(r3)
421END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
422#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000423 and. r0,r0,r22
424 beq+ 1f
425 andc r22,r22,r0
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000426 MTMSRD(r22)
Paul Mackerras9994a332005-10-10 22:36:14 +1000427 isync
4281: std r20,_NIP(r1)
429 mfcr r23
430 std r23,_CCR(r1)
431 std r1,KSP(r3) /* Set old stack pointer */
432
433#ifdef CONFIG_SMP
434 /* We need a sync somewhere here to make sure that if the
435 * previous task gets rescheduled on another CPU, it sees all
436 * stores it has performed on this one.
437 */
438 sync
439#endif /* CONFIG_SMP */
440
Anton Blanchardf89451f2010-08-11 01:40:27 +0000441 /*
442 * If we optimise away the clear of the reservation in system
443 * calls because we know the CPU tracks the address of the
444 * reservation, then we need to clear it here to cover the
445 * case that the kernel context switch path has no larx
446 * instructions.
447 */
448BEGIN_FTR_SECTION
449 ldarx r6,0,r1
450END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
451
Paul Mackerras9994a332005-10-10 22:36:14 +1000452 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
453 std r6,PACACURRENT(r13) /* Set new 'current' */
454
455 ld r8,KSP(r4) /* new stack pointer */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000456#ifdef CONFIG_PPC_BOOK3S
Paul Mackerras9994a332005-10-10 22:36:14 +1000457BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000458 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000459 clrrdi r6,r8,28 /* get its ESID */
460 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000461 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000462 clrrdi r6,r8,40 /* get its 1T ESID */
463 clrrdi r9,r1,40 /* get current sp 1T ESID */
Matt Evans44ae3ab2011-04-06 19:48:50 +0000464 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
Michael Ellermanc2303282008-06-24 11:33:05 +1000465FTR_SECTION_ELSE
466 b 2f
Matt Evans44ae3ab2011-04-06 19:48:50 +0000467ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000468 clrldi. r0,r6,2 /* is new ESID c00000000? */
469 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
470 cror eq,4*cr1+eq,eq
471 beq 2f /* if yes, don't slbie it */
472
473 /* Bolt in the new stack SLB entry */
474 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
475 oris r0,r6,(SLB_ESID_V)@h
476 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000477BEGIN_FTR_SECTION
478 li r9,MMU_SEGSIZE_1T /* insert B field */
479 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
480 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
Matt Evans44ae3ab2011-04-06 19:48:50 +0000481END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000482
Michael Neuling00efee72007-08-24 16:58:37 +1000483 /* Update the last bolted SLB. No write barriers are needed
484 * here, provided we only update the current CPU's SLB shadow
485 * buffer.
486 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000487 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000488 li r12,0
489 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
490 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
491 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000492
Matt Evans44ae3ab2011-04-06 19:48:50 +0000493 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
Olof Johanssonf66bce52007-10-16 00:58:59 +1000494 * we have 1TB segments, the only CPUs known to have the errata
495 * only support less than 1TB of system memory and we'll never
496 * actually hit this code path.
497 */
498
Paul Mackerras9994a332005-10-10 22:36:14 +1000499 slbie r6
500 slbie r6 /* Workaround POWER5 < DD2.1 issue */
501 slbmte r7,r0
502 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10005032:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000504#endif /* !CONFIG_PPC_BOOK3S */
505
Paul Mackerras9994a332005-10-10 22:36:14 +1000506 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
507 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
508 because we don't need to leave the 288-byte ABI gap at the
509 top of the kernel stack. */
510 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
511
512 mr r1,r8 /* start using new stack pointer */
513 std r7,PACAKSAVE(r13)
514
515 ld r6,_CCR(r1)
516 mtcrf 0xFF,r6
517
518#ifdef CONFIG_ALTIVEC
519BEGIN_FTR_SECTION
520 ld r0,THREAD_VRSAVE(r4)
521 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
522END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
523#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000524#ifdef CONFIG_PPC64
525BEGIN_FTR_SECTION
526 ld r0,THREAD_DSCR(r4)
527 cmpd r0,r25
528 beq 1f
529 mtspr SPRN_DSCR,r0
5301:
531END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
532#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000533
534 /* r3-r13 are destroyed -- Cort */
535 REST_8GPRS(14, r1)
536 REST_10GPRS(22, r1)
537
538 /* convert old thread to its task_struct for return value */
539 addi r3,r3,-THREAD
540 ld r7,_NIP(r1) /* Return to _switch caller in new task */
541 mtlr r7
542 addi r1,r1,SWITCH_FRAME_SIZE
543 blr
544
545 .align 7
546_GLOBAL(ret_from_except)
547 ld r11,_TRAP(r1)
548 andi. r0,r11,1
549 bne .ret_from_except_lite
550 REST_NVGPRS(r1)
551
552_GLOBAL(ret_from_except_lite)
553 /*
554 * Disable interrupts so that current_thread_info()->flags
555 * can't change between when we test it and when we return
556 * from the interrupt.
557 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000558#ifdef CONFIG_PPC_BOOK3E
559 wrteei 0
560#else
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100561 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
562 mtmsrd r10,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000563#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000564
565#ifdef CONFIG_PREEMPT
566 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
567 li r0,_TIF_NEED_RESCHED /* bits to check */
568 ld r3,_MSR(r1)
569 ld r4,TI_FLAGS(r9)
570 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
571 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
572 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
573 bne do_work
574
575#else /* !CONFIG_PREEMPT */
576 ld r3,_MSR(r1) /* Returning to user mode? */
577 andi. r3,r3,MSR_PR
578 beq restore /* if not, just restore regs and return */
579
580 /* Check current_thread_info()->flags */
581 clrrdi r9,r1,THREAD_SHIFT
582 ld r4,TI_FLAGS(r9)
583 andi. r0,r4,_TIF_USER_WORK_MASK
584 bne do_work
Benjamin Herrenschmidt4f8cf362012-02-28 13:44:58 +1100585#endif /* !CONFIG_PREEMPT */
Paul Mackerras9994a332005-10-10 22:36:14 +1000586
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100587 .globl fast_exc_return_irq
588fast_exc_return_irq:
Paul Mackerras9994a332005-10-10 22:36:14 +1000589restore:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100590 /*
591 * This is the main kernel exit path, we first check if we
592 * have to change our interrupt state.
593 */
Michael Ellerman01f38802008-07-16 14:21:34 +1000594 ld r5,SOFTE(r1)
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100595 lbz r6,PACASOFTIRQEN(r13)
596 cmpwi cr1,r5,0
597 cmpw cr0,r5,r6
598 beq cr0,4f
Paul Mackerras9994a332005-10-10 22:36:14 +1000599
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100600 /* We do, handle disable first, which is easy */
601 bne cr1,3f;
602 li r0,0
603 stb r0,PACASOFTIRQEN(r13);
604 TRACE_DISABLE_INTS
605 b 4f
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000606
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11006073: /*
608 * We are about to soft-enable interrupts (we are hard disabled
609 * at this point). We check if there's anything that needs to
610 * be replayed first.
611 */
612 lbz r0,PACAIRQHAPPENED(r13)
613 cmpwi cr0,r0,0
614 bne- restore_check_irq_replay
615
616 /*
617 * Get here when nothing happened while soft-disabled, just
618 * soft-enable and move-on. We will hard-enable as a side
619 * effect of rfi
620 */
621restore_no_replay:
622 TRACE_ENABLE_INTS
623 li r0,1
624 stb r0,PACASOFTIRQEN(r13);
625
626 /*
627 * Final return path. BookE is handled in a different file
628 */
6294:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000630#ifdef CONFIG_PPC_BOOK3E
631 b .exception_return_book3e
632#else
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100633 /*
634 * Clear the reservation. If we know the CPU tracks the address of
635 * the reservation then we can potentially save some cycles and use
636 * a larx. On POWER6 and POWER7 this is significantly faster.
637 */
638BEGIN_FTR_SECTION
639 stdcx. r0,0,r1 /* to clear the reservation */
640FTR_SECTION_ELSE
641 ldarx r4,0,r1
642ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
643
644 /*
645 * Some code path such as load_up_fpu or altivec return directly
646 * here. They run entirely hard disabled and do not alter the
647 * interrupt state. They also don't use lwarx/stwcx. and thus
648 * are known not to leave dangling reservations.
649 */
650 .globl fast_exception_return
651fast_exception_return:
652 ld r3,_MSR(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100653 ld r4,_CTR(r1)
654 ld r0,_LINK(r1)
655 mtctr r4
656 mtlr r0
657 ld r4,_XER(r1)
658 mtspr SPRN_XER,r4
659
660 REST_8GPRS(5, r1)
661
662 andi. r0,r3,MSR_RI
663 beq- unrecov_restore
664
Anton Blanchardf89451f2010-08-11 01:40:27 +0000665 /*
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100666 * Clear RI before restoring r13. If we are returning to
667 * userspace and we take an exception after restoring r13,
668 * we end up corrupting the userspace r13 value.
669 */
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100670 ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
671 andc r4,r4,r0 /* r0 contains MSR_RI here */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100672 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000673
674 /*
675 * r13 is our per cpu area, only restore it if we are returning to
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100676 * userspace the value stored in the stack frame may belong to
677 * another CPU.
Paul Mackerras9994a332005-10-10 22:36:14 +1000678 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100679 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000680 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100681 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000682 REST_GPR(13, r1)
6831:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100684 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000685
686 ld r2,_CCR(r1)
687 mtcrf 0xFF,r2
688 ld r2,_NIP(r1)
689 mtspr SPRN_SRR0,r2
690
691 ld r0,GPR0(r1)
692 ld r2,GPR2(r1)
693 ld r3,GPR3(r1)
694 ld r4,GPR4(r1)
695 ld r1,GPR1(r1)
696
697 rfid
698 b . /* prevent speculative execution */
699
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000700#endif /* CONFIG_PPC_BOOK3E */
701
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100702 /*
703 * Something did happen, check if a re-emit is needed
704 * (this also clears paca->irq_happened)
705 */
706restore_check_irq_replay:
707 /* XXX: We could implement a fast path here where we check
708 * for irq_happened being just 0x01, in which case we can
709 * clear it and return. That means that we would potentially
710 * miss a decrementer having wrapped all the way around.
711 *
712 * Still, this might be useful for things like hash_page
713 */
714 bl .__check_irq_replay
715 cmpwi cr0,r3,0
716 beq restore_no_replay
717
718 /*
719 * We need to re-emit an interrupt. We do so by re-using our
720 * existing exception frame. We first change the trap value,
721 * but we need to ensure we preserve the low nibble of it
722 */
723 ld r4,_TRAP(r1)
724 clrldi r4,r4,60
725 or r4,r4,r3
726 std r4,_TRAP(r1)
727
728 /*
729 * Then find the right handler and call it. Interrupts are
730 * still soft-disabled and we keep them that way.
731 */
732 cmpwi cr0,r3,0x500
733 bne 1f
734 addi r3,r1,STACK_FRAME_OVERHEAD;
735 bl .do_IRQ
736 b .ret_from_except
7371: cmpwi cr0,r3,0x900
738 bne 1f
739 addi r3,r1,STACK_FRAME_OVERHEAD;
740 bl .timer_interrupt
741 b .ret_from_except
742#ifdef CONFIG_PPC_BOOK3E
7431: cmpwi cr0,r3,0x280
744 bne 1f
745 addi r3,r1,STACK_FRAME_OVERHEAD;
746 bl .doorbell_exception
747 b .ret_from_except
748#endif /* CONFIG_PPC_BOOK3E */
7491: b .ret_from_except /* What else to do here ? */
750
Paul Mackerras9994a332005-10-10 22:36:14 +1000751do_work:
752#ifdef CONFIG_PREEMPT
753 andi. r0,r3,MSR_PR /* Returning to user mode? */
754 bne user_work
755 /* Check that preempt_count() == 0 and interrupts are enabled */
756 lwz r8,TI_PREEMPT(r9)
757 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000758 ld r0,SOFTE(r1)
759 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000760 crandc eq,cr1*4+eq,eq
761 bne restore
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000762
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100763 /*
764 * Here we are preempting the current task. We want to make
765 * sure we are soft-disabled first
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000766 */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100767 SOFT_DISABLE_INTS(r3,r4)
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +00007681: bl .preempt_schedule_irq
769
770 /* Hard-disable interrupts again (and update PACA) */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000771#ifdef CONFIG_PPC_BOOK3E
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000772 wrteei 0
773#else
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100774 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000775 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000776#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100777 li r0,PACA_IRQ_HARD_DIS
778 stb r0,PACAIRQHAPPENED(r13)
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000779
780 /* Re-test flags and eventually loop */
781 clrrdi r9,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000782 ld r4,TI_FLAGS(r9)
783 andi. r0,r4,_TIF_NEED_RESCHED
784 bne 1b
785 b restore
786
787user_work:
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000788#endif /* CONFIG_PREEMPT */
789
Paul Mackerras9994a332005-10-10 22:36:14 +1000790 /* Enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000791#ifdef CONFIG_PPC_BOOK3E
792 wrteei 1
793#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000794 ori r10,r10,MSR_EE
795 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000796#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000797
798 andi. r0,r4,_TIF_NEED_RESCHED
799 beq 1f
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100800 bl .restore_interrupts
Paul Mackerras9994a332005-10-10 22:36:14 +1000801 bl .schedule
802 b .ret_from_except_lite
803
8041: bl .save_nvgprs
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100805 bl .restore_interrupts
Roland McGrath7d6d6372008-07-27 16:52:52 +1000806 addi r3,r1,STACK_FRAME_OVERHEAD
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +1100807 bl .do_notify_resume
Paul Mackerras9994a332005-10-10 22:36:14 +1000808 b .ret_from_except
809
810unrecov_restore:
811 addi r3,r1,STACK_FRAME_OVERHEAD
812 bl .unrecoverable_exception
813 b unrecov_restore
814
815#ifdef CONFIG_PPC_RTAS
816/*
817 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
818 * called with the MMU off.
819 *
820 * In addition, we need to be in 32b mode, at least for now.
821 *
822 * Note: r3 is an input parameter to rtas, so don't trash it...
823 */
824_GLOBAL(enter_rtas)
825 mflr r0
826 std r0,16(r1)
827 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
828
829 /* Because RTAS is running in 32b mode, it clobbers the high order half
830 * of all registers that it saves. We therefore save those registers
831 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
832 */
833 SAVE_GPR(2, r1) /* Save the TOC */
834 SAVE_GPR(13, r1) /* Save paca */
835 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
836 SAVE_10GPRS(22, r1) /* ditto */
837
838 mfcr r4
839 std r4,_CCR(r1)
840 mfctr r5
841 std r5,_CTR(r1)
842 mfspr r6,SPRN_XER
843 std r6,_XER(r1)
844 mfdar r7
845 std r7,_DAR(r1)
846 mfdsisr r8
847 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000848
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800849 /* Temporary workaround to clear CR until RTAS can be modified to
850 * ignore all bits.
851 */
852 li r0,0
853 mtcr r0
854
David Woodhouse007d88d2007-01-01 18:45:34 +0000855#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000856 /* There is no way it is acceptable to get here with interrupts enabled,
857 * check it with the asm equivalent of WARN_ON
858 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000859 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10008601: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000861 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
862#endif
863
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000864 /* Hard-disable interrupts */
865 mfmsr r6
866 rldicl r7,r6,48,1
867 rotldi r7,r7,16
868 mtmsrd r7,1
869
Paul Mackerras9994a332005-10-10 22:36:14 +1000870 /* Unfortunately, the stack pointer and the MSR are also clobbered,
871 * so they are saved in the PACA which allows us to restore
872 * our original state after RTAS returns.
873 */
874 std r1,PACAR1(r13)
875 std r6,PACASAVEDMSR(r13)
876
877 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100878 LOAD_REG_ADDR(r4,.rtas_return_loc)
879 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000880 mtlr r4
881
882 li r0,0
883 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
884 andc r0,r6,r0
885
886 li r9,1
887 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Anton Blanchard44c9f3c2010-02-07 19:37:29 +0000888 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
Paul Mackerras9994a332005-10-10 22:36:14 +1000889 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +1000890 sync /* disable interrupts so SRR0/1 */
891 mtmsrd r0 /* don't get trashed */
892
David Gibsone58c3492006-01-13 14:56:25 +1100893 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000894 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
895 ld r4,RTASBASE(r4) /* get the rtas->base value */
896
897 mtspr SPRN_SRR0,r5
898 mtspr SPRN_SRR1,r6
899 rfid
900 b . /* prevent speculative execution */
901
902_STATIC(rtas_return_loc)
903 /* relocation is off at this point */
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +1100904 GET_PACA(r4)
David Gibsone58c3492006-01-13 14:56:25 +1100905 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000906
Paul Mackerrase31aa452008-08-30 11:41:12 +1000907 bcl 20,31,$+4
9080: mflr r3
909 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
910
Paul Mackerras9994a332005-10-10 22:36:14 +1000911 mfmsr r6
912 li r0,MSR_RI
913 andc r6,r6,r0
914 sync
915 mtmsrd r6
916
917 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +1000918 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
919
920 mtspr SPRN_SRR0,r3
921 mtspr SPRN_SRR1,r4
922 rfid
923 b . /* prevent speculative execution */
924
Paul Mackerrase31aa452008-08-30 11:41:12 +1000925 .align 3
9261: .llong .rtas_restore_regs
927
Paul Mackerras9994a332005-10-10 22:36:14 +1000928_STATIC(rtas_restore_regs)
929 /* relocation is on at this point */
930 REST_GPR(2, r1) /* Restore the TOC */
931 REST_GPR(13, r1) /* Restore paca */
932 REST_8GPRS(14, r1) /* Restore the non-volatiles */
933 REST_10GPRS(22, r1) /* ditto */
934
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +1100935 GET_PACA(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000936
937 ld r4,_CCR(r1)
938 mtcr r4
939 ld r5,_CTR(r1)
940 mtctr r5
941 ld r6,_XER(r1)
942 mtspr SPRN_XER,r6
943 ld r7,_DAR(r1)
944 mtdar r7
945 ld r8,_DSISR(r1)
946 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +1000947
948 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
949 ld r0,16(r1) /* get return address */
950
951 mtlr r0
952 blr /* return to caller */
953
954#endif /* CONFIG_PPC_RTAS */
955
Paul Mackerras9994a332005-10-10 22:36:14 +1000956_GLOBAL(enter_prom)
957 mflr r0
958 std r0,16(r1)
959 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
960
961 /* Because PROM is running in 32b mode, it clobbers the high order half
962 * of all registers that it saves. We therefore save those registers
963 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
964 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000965 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000966 SAVE_GPR(13, r1)
967 SAVE_8GPRS(14, r1)
968 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000969 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +1000970 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000971 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000972 std r11,_MSR(r1)
973
974 /* Get the PROM entrypoint */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000975 mtlr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000976
977 /* Switch MSR to 32 bits mode
978 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000979#ifdef CONFIG_PPC_BOOK3E
980 rlwinm r11,r11,0,1,31
981 mtmsr r11
982#else /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000983 mfmsr r11
984 li r12,1
985 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
986 andc r11,r11,r12
987 li r12,1
988 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
989 andc r11,r11,r12
990 mtmsrd r11
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000991#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000992 isync
993
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000994 /* Enter PROM here... */
Paul Mackerras9994a332005-10-10 22:36:14 +1000995 blrl
996
997 /* Just make sure that r1 top 32 bits didn't get
998 * corrupt by OF
999 */
1000 rldicl r1,r1,0,32
1001
1002 /* Restore the MSR (back to 64 bits) */
1003 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001004 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +10001005 isync
1006
1007 /* Restore other registers */
1008 REST_GPR(2, r1)
1009 REST_GPR(13, r1)
1010 REST_8GPRS(14, r1)
1011 REST_10GPRS(22, r1)
1012 ld r4,_CCR(r1)
1013 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001014
1015 addi r1,r1,PROM_FRAME_SIZE
1016 ld r0,16(r1)
1017 mtlr r0
1018 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -04001019
Steven Rostedt606576c2008-10-06 19:06:12 -04001020#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001021#ifdef CONFIG_DYNAMIC_FTRACE
1022_GLOBAL(mcount)
1023_GLOBAL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001024 blr
1025
1026_GLOBAL(ftrace_caller)
1027 /* Taken from output of objdump from lib64/glibc */
1028 mflr r3
1029 ld r11, 0(r1)
1030 stdu r1, -112(r1)
1031 std r3, 128(r1)
1032 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301033 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001034.globl ftrace_call
1035ftrace_call:
1036 bl ftrace_stub
1037 nop
Steven Rostedt46542882009-02-10 22:19:54 -08001038#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1039.globl ftrace_graph_call
1040ftrace_graph_call:
1041 b ftrace_graph_stub
1042_GLOBAL(ftrace_graph_stub)
1043#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001044 ld r0, 128(r1)
1045 mtlr r0
1046 addi r1, r1, 112
1047_GLOBAL(ftrace_stub)
1048 blr
1049#else
1050_GLOBAL(mcount)
1051 blr
1052
1053_GLOBAL(_mcount)
1054 /* Taken from output of objdump from lib64/glibc */
1055 mflr r3
1056 ld r11, 0(r1)
1057 stdu r1, -112(r1)
1058 std r3, 128(r1)
1059 ld r4, 16(r11)
1060
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301061 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001062 LOAD_REG_ADDR(r5,ftrace_trace_function)
1063 ld r5,0(r5)
1064 ld r5,0(r5)
1065 mtctr r5
1066 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001067 nop
Steven Rostedt6794c782009-02-09 21:10:27 -08001068
1069
1070#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1071 b ftrace_graph_caller
1072#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001073 ld r0, 128(r1)
1074 mtlr r0
1075 addi r1, r1, 112
1076_GLOBAL(ftrace_stub)
1077 blr
1078
Steven Rostedt6794c782009-02-09 21:10:27 -08001079#endif /* CONFIG_DYNAMIC_FTRACE */
1080
1081#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt46542882009-02-10 22:19:54 -08001082_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001083 /* load r4 with local address */
1084 ld r4, 128(r1)
1085 subi r4, r4, MCOUNT_INSN_SIZE
1086
1087 /* get the parent address */
1088 ld r11, 112(r1)
1089 addi r3, r11, 16
1090
1091 bl .prepare_ftrace_return
1092 nop
1093
1094 ld r0, 128(r1)
1095 mtlr r0
1096 addi r1, r1, 112
1097 blr
1098
1099_GLOBAL(return_to_handler)
1100 /* need to save return values */
Steven Rostedtbb725342009-02-11 12:45:49 -08001101 std r4, -24(r1)
1102 std r3, -16(r1)
1103 std r31, -8(r1)
1104 mr r31, r1
1105 stdu r1, -112(r1)
1106
1107 bl .ftrace_return_to_handler
1108 nop
1109
1110 /* return value has real return address */
1111 mtlr r3
1112
1113 ld r1, 0(r1)
1114 ld r4, -24(r1)
1115 ld r3, -16(r1)
1116 ld r31, -8(r1)
1117
1118 /* Jump back to real return address */
1119 blr
1120
1121_GLOBAL(mod_return_to_handler)
1122 /* need to save return values */
Steven Rostedt6794c782009-02-09 21:10:27 -08001123 std r4, -32(r1)
1124 std r3, -24(r1)
1125 /* save TOC */
1126 std r2, -16(r1)
1127 std r31, -8(r1)
1128 mr r31, r1
1129 stdu r1, -112(r1)
1130
Steven Rostedtbb725342009-02-11 12:45:49 -08001131 /*
1132 * We are in a module using the module's TOC.
1133 * Switch to our TOC to run inside the core kernel.
1134 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001135 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001136
1137 bl .ftrace_return_to_handler
1138 nop
1139
1140 /* return value has real return address */
1141 mtlr r3
1142
1143 ld r1, 0(r1)
1144 ld r4, -32(r1)
1145 ld r3, -24(r1)
1146 ld r2, -16(r1)
1147 ld r31, -8(r1)
1148
1149 /* Jump back to real return address */
1150 blr
1151#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1152#endif /* CONFIG_FUNCTION_TRACER */