blob: 866462cbe2d832341d2cbf5b89d32be7986ec815 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100030#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000031#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100032#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100033#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100045 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100046
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
Paul Mackerrasc6622f62006-02-24 10:06:59 +110064 ACCOUNT_CPU_USER_ENTRY(r10, r11)
Paul Mackerrasab598b62008-11-30 11:49:45 +000065 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */
71 crclr so
Paul Mackerras9994a332005-10-10 22:36:14 +100072 std r2,GPR2(r1)
73 std r3,GPR3(r1)
74 std r4,GPR4(r1)
75 std r5,GPR5(r1)
76 std r6,GPR6(r1)
77 std r7,GPR7(r1)
78 std r8,GPR8(r1)
79 li r11,0
80 std r11,GPR9(r1)
81 std r11,GPR10(r1)
82 std r11,GPR11(r1)
83 std r11,GPR12(r1)
84 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100085 mfcr r9
86 mflr r10
87 li r11,0xc01
88 std r9,_CCR(r1)
89 std r10,_LINK(r1)
90 std r11,_TRAP(r1)
91 mfxer r9
92 mfctr r10
93 std r9,_XER(r1)
94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000100#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
101BEGIN_FW_FTR_SECTION
102 beq 33f
103 /* if from user, see if there are any DTL entries to process */
104 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
105 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
106 ld r10,LPPACA_DTLIDX(r10) /* get log write index */
107 cmpd cr1,r11,r10
108 beq+ cr1,33f
109 bl .accumulate_stolen_time
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
11433:
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
116#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
117
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000118#ifdef CONFIG_TRACE_IRQFLAGS
119 bl .trace_hardirqs_on
120 REST_GPR(0,r1)
121 REST_4GPRS(3,r1)
122 REST_2GPRS(7,r1)
123 addi r9,r1,STACK_FRAME_OVERHEAD
124 ld r12,_MSR(r1)
125#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000126 li r10,1
127 stb r10,PACASOFTIRQEN(r13)
128 stb r10,PACAHARDIRQEN(r13)
129 std r10,SOFTE(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000130#ifdef CONFIG_PPC_ISERIES
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000131BEGIN_FW_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000132 /* Hack for handling interrupts when soft-enabling on iSeries */
133 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
134 andi. r10,r12,MSR_PR /* from kernel */
135 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
Stephen Rothwellc7056772006-11-27 14:59:50 +1100136 bne 2f
137 b hardware_interrupt_entry
1382:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000139END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000140#endif /* CONFIG_PPC_ISERIES */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000141
142 /* Hard enable interrupts */
143#ifdef CONFIG_PPC_BOOK3E
144 wrteei 1
145#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000146 mfmsr r11
147 ori r11,r11,MSR_EE
148 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000149#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000150
151#ifdef SHOW_SYSCALLS
152 bl .do_show_syscall
153 REST_GPR(0,r1)
154 REST_4GPRS(3,r1)
155 REST_2GPRS(7,r1)
156 addi r9,r1,STACK_FRAME_OVERHEAD
157#endif
158 clrrdi r11,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000159 ld r10,TI_FLAGS(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000160 andi. r11,r10,_TIF_SYSCALL_T_OR_A
161 bne- syscall_dotrace
162syscall_dotrace_cont:
163 cmpldi 0,r0,NR_syscalls
164 bge- syscall_enosys
165
166system_call: /* label this so stack traces look sane */
167/*
168 * Need to vector to 32 Bit or default sys_call_table here,
169 * based on caller's run-mode / personality.
170 */
171 ld r11,.SYS_CALL_TABLE@toc(2)
172 andi. r10,r10,_TIF_32BIT
173 beq 15f
174 addi r11,r11,8 /* use 32-bit syscall entries */
175 clrldi r3,r3,32
176 clrldi r4,r4,32
177 clrldi r5,r5,32
178 clrldi r6,r6,32
179 clrldi r7,r7,32
180 clrldi r8,r8,32
18115:
182 slwi r0,r0,4
183 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
184 mtctr r10
185 bctrl /* Call handler */
186
187syscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000188 std r3,RESULT(r1)
David Woodhouse401d1f02005-11-15 18:52:18 +0000189#ifdef SHOW_SYSCALLS
190 bl .do_show_syscall_exit
191 ld r3,RESULT(r1)
192#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000193 clrrdi r12,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000194
Paul Mackerras9994a332005-10-10 22:36:14 +1000195 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000196#ifdef CONFIG_PPC_BOOK3S
197 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000198 andi. r10,r8,MSR_RI
199 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000200#endif
201
202 /* Disable interrupts so current_thread_info()->flags can't change,
203 * and so that we don't get interrupted after loading SRR0/1.
204 */
205#ifdef CONFIG_PPC_BOOK3E
206 wrteei 0
207#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000208 mfmsr r10
209 rldicl r10,r10,48,1
210 rotldi r10,r10,16
211 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000212#endif /* CONFIG_PPC_BOOK3E */
213
Paul Mackerras9994a332005-10-10 22:36:14 +1000214 ld r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000215 li r11,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100216 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000217 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000218 cmpld r3,r11
219 ld r5,_CCR(r1)
220 bge- syscall_error
221syscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000222 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000223BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000224 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000225END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000226 andi. r6,r8,MSR_PR
227 ld r4,_LINK(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100228 /*
229 * Clear RI before restoring r13. If we are returning to
230 * userspace and we take an exception after restoring r13,
231 * we end up corrupting the userspace r13 value.
232 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000233#ifdef CONFIG_PPC_BOOK3S
234 /* No MSR:RI on BookE */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100235 li r12,MSR_RI
236 andc r11,r10,r12
237 mtmsrd r11,1 /* clear MSR.RI */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000238#endif /* CONFIG_PPC_BOOK3S */
239
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100240 beq- 1f
241 ACCOUNT_CPU_USER_EXIT(r11, r12)
242 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Paul Mackerras9994a332005-10-10 22:36:14 +10002431: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000244 ld r1,GPR1(r1)
245 mtlr r4
246 mtcr r5
247 mtspr SPRN_SRR0,r7
248 mtspr SPRN_SRR1,r8
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000249 RFI
Paul Mackerras9994a332005-10-10 22:36:14 +1000250 b . /* prevent speculative execution */
251
David Woodhouse401d1f02005-11-15 18:52:18 +0000252syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000253 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000254 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000255 std r5,_CCR(r1)
256 b syscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000257
Paul Mackerras9994a332005-10-10 22:36:14 +1000258/* Traced system call support */
259syscall_dotrace:
260 bl .save_nvgprs
261 addi r3,r1,STACK_FRAME_OVERHEAD
262 bl .do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000263 /*
264 * Restore argument registers possibly just changed.
265 * We use the return value of do_syscall_trace_enter
266 * for the call number to look up in the table (r0).
267 */
268 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000269 ld r3,GPR3(r1)
270 ld r4,GPR4(r1)
271 ld r5,GPR5(r1)
272 ld r6,GPR6(r1)
273 ld r7,GPR7(r1)
274 ld r8,GPR8(r1)
275 addi r9,r1,STACK_FRAME_OVERHEAD
276 clrrdi r10,r1,THREAD_SHIFT
277 ld r10,TI_FLAGS(r10)
278 b syscall_dotrace_cont
279
David Woodhouse401d1f02005-11-15 18:52:18 +0000280syscall_enosys:
281 li r3,-ENOSYS
282 b syscall_exit
283
284syscall_exit_work:
285 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
286 If TIF_NOERROR is set, just save r3 as it is. */
287
288 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100289 beq+ 0f
290 REST_NVGPRS(r1)
291 b 2f
2920: cmpld r3,r11 /* r10 is -LAST_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000293 blt+ 1f
294 andi. r0,r9,_TIF_NOERROR
295 bne- 1f
296 ld r5,_CCR(r1)
297 neg r3,r3
298 oris r5,r5,0x1000 /* Set SO bit in CR */
299 std r5,_CCR(r1)
3001: std r3,GPR3(r1)
3012: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
302 beq 4f
303
Paul Mackerras1bd79332006-03-08 13:24:22 +1100304 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000305
306 li r11,_TIF_PERSYSCALL_MASK
307 addi r12,r12,TI_FLAGS
3083: ldarx r10,0,r12
309 andc r10,r10,r11
310 stdcx. r10,0,r12
311 bne- 3b
312 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100313
3144: /* Anything else left to do? */
315 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
David Woodhouse401d1f02005-11-15 18:52:18 +0000316 beq .ret_from_except_lite
317
318 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000319#ifdef CONFIG_PPC_BOOK3E
320 wrteei 1
321#else
David Woodhouse401d1f02005-11-15 18:52:18 +0000322 mfmsr r10
323 ori r10,r10,MSR_EE
324 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000325#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000326
Paul Mackerras1bd79332006-03-08 13:24:22 +1100327 bl .save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000328 addi r3,r1,STACK_FRAME_OVERHEAD
329 bl .do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100330 b .ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000331
332/* Save non-volatile GPRs, if not already saved. */
333_GLOBAL(save_nvgprs)
334 ld r11,_TRAP(r1)
335 andi. r0,r11,1
336 beqlr-
337 SAVE_NVGPRS(r1)
338 clrrdi r0,r11,1
339 std r0,_TRAP(r1)
340 blr
341
David Woodhouse401d1f02005-11-15 18:52:18 +0000342
Paul Mackerras9994a332005-10-10 22:36:14 +1000343/*
344 * The sigsuspend and rt_sigsuspend system calls can call do_signal
345 * and thus put the process into the stopped state where we might
346 * want to examine its user state with ptrace. Therefore we need
347 * to save all the nonvolatile registers (r14 - r31) before calling
348 * the C code. Similarly, fork, vfork and clone need the full
349 * register state on the stack so that it can be copied to the child.
350 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000351
352_GLOBAL(ppc_fork)
353 bl .save_nvgprs
354 bl .sys_fork
355 b syscall_exit
356
357_GLOBAL(ppc_vfork)
358 bl .save_nvgprs
359 bl .sys_vfork
360 b syscall_exit
361
362_GLOBAL(ppc_clone)
363 bl .save_nvgprs
364 bl .sys_clone
365 b syscall_exit
366
Paul Mackerras1bd79332006-03-08 13:24:22 +1100367_GLOBAL(ppc32_swapcontext)
368 bl .save_nvgprs
369 bl .compat_sys_swapcontext
370 b syscall_exit
371
372_GLOBAL(ppc64_swapcontext)
373 bl .save_nvgprs
374 bl .sys_swapcontext
375 b syscall_exit
376
Paul Mackerras9994a332005-10-10 22:36:14 +1000377_GLOBAL(ret_from_fork)
378 bl .schedule_tail
379 REST_NVGPRS(r1)
380 li r3,0
381 b syscall_exit
382
383/*
384 * This routine switches between two different tasks. The process
385 * state of one is saved on its kernel stack. Then the state
386 * of the other is restored from its kernel stack. The memory
387 * management hardware is updated to the second process's state.
388 * Finally, we can return to the second process, via ret_from_except.
389 * On entry, r3 points to the THREAD for the current task, r4
390 * points to the THREAD for the new task.
391 *
392 * Note: there are two ways to get to the "going out" portion
393 * of this code; either by coming in via the entry (_switch)
394 * or via "fork" which must set up an environment equivalent
395 * to the "_switch" path. If you change this you'll have to change
396 * the fork code also.
397 *
398 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600399 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000400 */
401 .align 7
402_GLOBAL(_switch)
403 mflr r0
404 std r0,16(r1)
405 stdu r1,-SWITCH_FRAME_SIZE(r1)
406 /* r3-r13 are caller saved -- Cort */
407 SAVE_8GPRS(14, r1)
408 SAVE_10GPRS(22, r1)
409 mflr r20 /* Return to switch caller */
410 mfmsr r22
411 li r0, MSR_FP
Michael Neulingce48b212008-06-25 14:07:18 +1000412#ifdef CONFIG_VSX
413BEGIN_FTR_SECTION
414 oris r0,r0,MSR_VSX@h /* Disable VSX */
415END_FTR_SECTION_IFSET(CPU_FTR_VSX)
416#endif /* CONFIG_VSX */
Paul Mackerras9994a332005-10-10 22:36:14 +1000417#ifdef CONFIG_ALTIVEC
418BEGIN_FTR_SECTION
419 oris r0,r0,MSR_VEC@h /* Disable altivec */
420 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
421 std r24,THREAD_VRSAVE(r3)
422END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
423#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000424#ifdef CONFIG_PPC64
425BEGIN_FTR_SECTION
426 mfspr r25,SPRN_DSCR
427 std r25,THREAD_DSCR(r3)
428END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
429#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000430 and. r0,r0,r22
431 beq+ 1f
432 andc r22,r22,r0
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000433 MTMSRD(r22)
Paul Mackerras9994a332005-10-10 22:36:14 +1000434 isync
4351: std r20,_NIP(r1)
436 mfcr r23
437 std r23,_CCR(r1)
438 std r1,KSP(r3) /* Set old stack pointer */
439
440#ifdef CONFIG_SMP
441 /* We need a sync somewhere here to make sure that if the
442 * previous task gets rescheduled on another CPU, it sees all
443 * stores it has performed on this one.
444 */
445 sync
446#endif /* CONFIG_SMP */
447
Anton Blanchardf89451f2010-08-11 01:40:27 +0000448 /*
449 * If we optimise away the clear of the reservation in system
450 * calls because we know the CPU tracks the address of the
451 * reservation, then we need to clear it here to cover the
452 * case that the kernel context switch path has no larx
453 * instructions.
454 */
455BEGIN_FTR_SECTION
456 ldarx r6,0,r1
457END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
458
Paul Mackerras9994a332005-10-10 22:36:14 +1000459 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
460 std r6,PACACURRENT(r13) /* Set new 'current' */
461
462 ld r8,KSP(r4) /* new stack pointer */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000463#ifdef CONFIG_PPC_BOOK3S
Paul Mackerras9994a332005-10-10 22:36:14 +1000464BEGIN_FTR_SECTION
Michael Ellermanc2303282008-06-24 11:33:05 +1000465 BEGIN_FTR_SECTION_NESTED(95)
Paul Mackerras9994a332005-10-10 22:36:14 +1000466 clrrdi r6,r8,28 /* get its ESID */
467 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellermanc2303282008-06-24 11:33:05 +1000468 FTR_SECTION_ELSE_NESTED(95)
Paul Mackerras1189be62007-10-11 20:37:10 +1000469 clrrdi r6,r8,40 /* get its 1T ESID */
470 clrrdi r9,r1,40 /* get current sp 1T ESID */
Matt Evans44ae3ab2011-04-06 19:48:50 +0000471 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
Michael Ellermanc2303282008-06-24 11:33:05 +1000472FTR_SECTION_ELSE
473 b 2f
Matt Evans44ae3ab2011-04-06 19:48:50 +0000474ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
Paul Mackerras9994a332005-10-10 22:36:14 +1000475 clrldi. r0,r6,2 /* is new ESID c00000000? */
476 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
477 cror eq,4*cr1+eq,eq
478 beq 2f /* if yes, don't slbie it */
479
480 /* Bolt in the new stack SLB entry */
481 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
482 oris r0,r6,(SLB_ESID_V)@h
483 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000484BEGIN_FTR_SECTION
485 li r9,MMU_SEGSIZE_1T /* insert B field */
486 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
487 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
Matt Evans44ae3ab2011-04-06 19:48:50 +0000488END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000489
Michael Neuling00efee72007-08-24 16:58:37 +1000490 /* Update the last bolted SLB. No write barriers are needed
491 * here, provided we only update the current CPU's SLB shadow
492 * buffer.
493 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000494 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000495 li r12,0
496 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
497 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
498 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000499
Matt Evans44ae3ab2011-04-06 19:48:50 +0000500 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
Olof Johanssonf66bce52007-10-16 00:58:59 +1000501 * we have 1TB segments, the only CPUs known to have the errata
502 * only support less than 1TB of system memory and we'll never
503 * actually hit this code path.
504 */
505
Paul Mackerras9994a332005-10-10 22:36:14 +1000506 slbie r6
507 slbie r6 /* Workaround POWER5 < DD2.1 issue */
508 slbmte r7,r0
509 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10005102:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000511#endif /* !CONFIG_PPC_BOOK3S */
512
Paul Mackerras9994a332005-10-10 22:36:14 +1000513 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
514 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
515 because we don't need to leave the 288-byte ABI gap at the
516 top of the kernel stack. */
517 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
518
519 mr r1,r8 /* start using new stack pointer */
520 std r7,PACAKSAVE(r13)
521
522 ld r6,_CCR(r1)
523 mtcrf 0xFF,r6
524
525#ifdef CONFIG_ALTIVEC
526BEGIN_FTR_SECTION
527 ld r0,THREAD_VRSAVE(r4)
528 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
529END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
530#endif /* CONFIG_ALTIVEC */
Alexey Kardashevskiyefcac652011-03-02 15:18:48 +0000531#ifdef CONFIG_PPC64
532BEGIN_FTR_SECTION
533 ld r0,THREAD_DSCR(r4)
534 cmpd r0,r25
535 beq 1f
536 mtspr SPRN_DSCR,r0
5371:
538END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
539#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000540
541 /* r3-r13 are destroyed -- Cort */
542 REST_8GPRS(14, r1)
543 REST_10GPRS(22, r1)
544
545 /* convert old thread to its task_struct for return value */
546 addi r3,r3,-THREAD
547 ld r7,_NIP(r1) /* Return to _switch caller in new task */
548 mtlr r7
549 addi r1,r1,SWITCH_FRAME_SIZE
550 blr
551
552 .align 7
553_GLOBAL(ret_from_except)
554 ld r11,_TRAP(r1)
555 andi. r0,r11,1
556 bne .ret_from_except_lite
557 REST_NVGPRS(r1)
558
559_GLOBAL(ret_from_except_lite)
560 /*
561 * Disable interrupts so that current_thread_info()->flags
562 * can't change between when we test it and when we return
563 * from the interrupt.
564 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000565#ifdef CONFIG_PPC_BOOK3E
566 wrteei 0
567#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000568 mfmsr r10 /* Get current interrupt state */
569 rldicl r9,r10,48,1 /* clear MSR_EE */
570 rotldi r9,r9,16
571 mtmsrd r9,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000572#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000573
574#ifdef CONFIG_PREEMPT
575 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
576 li r0,_TIF_NEED_RESCHED /* bits to check */
577 ld r3,_MSR(r1)
578 ld r4,TI_FLAGS(r9)
579 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
580 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
581 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
582 bne do_work
583
584#else /* !CONFIG_PREEMPT */
585 ld r3,_MSR(r1) /* Returning to user mode? */
586 andi. r3,r3,MSR_PR
587 beq restore /* if not, just restore regs and return */
588
589 /* Check current_thread_info()->flags */
590 clrrdi r9,r1,THREAD_SHIFT
591 ld r4,TI_FLAGS(r9)
592 andi. r0,r4,_TIF_USER_WORK_MASK
593 bne do_work
594#endif
595
596restore:
Stephen Rothwell3f639ee2006-09-25 18:19:00 +1000597BEGIN_FW_FTR_SECTION
Michael Ellerman01f38802008-07-16 14:21:34 +1000598 ld r5,SOFTE(r1)
599FW_FTR_SECTION_ELSE
Anton Blanchard917e4072009-10-18 01:24:29 +0000600 b .Liseries_check_pending_irqs
Michael Ellerman01f38802008-07-16 14:21:34 +1000601ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
6022:
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000603 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000604
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000605 /* extract EE bit and use it to restore paca->hard_enabled */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100606 ld r3,_MSR(r1)
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000607 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
608 stb r4,PACAHARDIRQEN(r13)
609
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000610#ifdef CONFIG_PPC_BOOK3E
611 b .exception_return_book3e
612#else
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100613 ld r4,_CTR(r1)
614 ld r0,_LINK(r1)
615 mtctr r4
616 mtlr r0
617 ld r4,_XER(r1)
618 mtspr SPRN_XER,r4
619
620 REST_8GPRS(5, r1)
621
622 andi. r0,r3,MSR_RI
623 beq- unrecov_restore
624
Anton Blanchardf89451f2010-08-11 01:40:27 +0000625 /*
626 * Clear the reservation. If we know the CPU tracks the address of
627 * the reservation then we can potentially save some cycles and use
628 * a larx. On POWER6 and POWER7 this is significantly faster.
629 */
630BEGIN_FTR_SECTION
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100631 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000632FTR_SECTION_ELSE
633 ldarx r4,0,r1
634ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100635
636 /*
637 * Clear RI before restoring r13. If we are returning to
638 * userspace and we take an exception after restoring r13,
639 * we end up corrupting the userspace r13 value.
640 */
641 mfmsr r4
642 andc r4,r4,r0 /* r0 contains MSR_RI here */
643 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000644
645 /*
646 * r13 is our per cpu area, only restore it if we are returning to
647 * userspace
648 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100649 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000650 beq 1f
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100651 ACCOUNT_CPU_USER_EXIT(r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000652 REST_GPR(13, r1)
6531:
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100654 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000655
656 ld r2,_CCR(r1)
657 mtcrf 0xFF,r2
658 ld r2,_NIP(r1)
659 mtspr SPRN_SRR0,r2
660
661 ld r0,GPR0(r1)
662 ld r2,GPR2(r1)
663 ld r3,GPR3(r1)
664 ld r4,GPR4(r1)
665 ld r1,GPR1(r1)
666
667 rfid
668 b . /* prevent speculative execution */
669
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000670#endif /* CONFIG_PPC_BOOK3E */
671
Anton Blanchard917e4072009-10-18 01:24:29 +0000672.Liseries_check_pending_irqs:
Michael Ellerman01f38802008-07-16 14:21:34 +1000673#ifdef CONFIG_PPC_ISERIES
674 ld r5,SOFTE(r1)
675 cmpdi 0,r5,0
676 beq 2b
677 /* Check for pending interrupts (iSeries) */
678 ld r3,PACALPPACAPTR(r13)
679 ld r3,LPPACAANYINT(r3)
680 cmpdi r3,0
681 beq+ 2b /* skip do_IRQ if no interrupts */
682
683 li r3,0
684 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
685#ifdef CONFIG_TRACE_IRQFLAGS
686 bl .trace_hardirqs_off
687 mfmsr r10
688#endif
689 ori r10,r10,MSR_EE
690 mtmsrd r10 /* hard-enable again */
691 addi r3,r1,STACK_FRAME_OVERHEAD
692 bl .do_IRQ
693 b .ret_from_except_lite /* loop back and handle more */
694#endif
695
Paul Mackerras9994a332005-10-10 22:36:14 +1000696do_work:
697#ifdef CONFIG_PREEMPT
698 andi. r0,r3,MSR_PR /* Returning to user mode? */
699 bne user_work
700 /* Check that preempt_count() == 0 and interrupts are enabled */
701 lwz r8,TI_PREEMPT(r9)
702 cmpwi cr1,r8,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000703 ld r0,SOFTE(r1)
704 cmpdi r0,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000705 crandc eq,cr1*4+eq,eq
706 bne restore
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000707
708 /* Here we are preempting the current task.
709 *
710 * Ensure interrupts are soft-disabled. We also properly mark
711 * the PACA to reflect the fact that they are hard-disabled
712 * and trace the change
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +1000713 */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000714 li r0,0
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000715 stb r0,PACASOFTIRQEN(r13)
716 stb r0,PACAHARDIRQEN(r13)
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000717 TRACE_DISABLE_INTS
718
719 /* Call the scheduler with soft IRQs off */
7201: bl .preempt_schedule_irq
721
722 /* Hard-disable interrupts again (and update PACA) */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000723#ifdef CONFIG_PPC_BOOK3E
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000724 wrteei 0
725#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000726 mfmsr r10
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000727 rldicl r10,r10,48,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000728 rotldi r10,r10,16
729 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000730#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000731 li r0,0
732 stb r0,PACAHARDIRQEN(r13)
733
734 /* Re-test flags and eventually loop */
735 clrrdi r9,r1,THREAD_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000736 ld r4,TI_FLAGS(r9)
737 andi. r0,r4,_TIF_NEED_RESCHED
738 bne 1b
739 b restore
740
741user_work:
Benjamin Herrenschmidt4f917ba2009-10-26 19:41:17 +0000742#endif /* CONFIG_PREEMPT */
743
Paul Mackerras9994a332005-10-10 22:36:14 +1000744 /* Enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000745#ifdef CONFIG_PPC_BOOK3E
746 wrteei 1
747#else
Paul Mackerras9994a332005-10-10 22:36:14 +1000748 ori r10,r10,MSR_EE
749 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000750#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000751
752 andi. r0,r4,_TIF_NEED_RESCHED
753 beq 1f
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +1100754 li r5,1
755 TRACE_AND_RESTORE_IRQ(r5);
Paul Mackerras9994a332005-10-10 22:36:14 +1000756 bl .schedule
757 b .ret_from_except_lite
758
7591: bl .save_nvgprs
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +1100760 li r5,1
761 TRACE_AND_RESTORE_IRQ(r5);
Roland McGrath7d6d6372008-07-27 16:52:52 +1000762 addi r3,r1,STACK_FRAME_OVERHEAD
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +1100763 bl .do_notify_resume
Paul Mackerras9994a332005-10-10 22:36:14 +1000764 b .ret_from_except
765
766unrecov_restore:
767 addi r3,r1,STACK_FRAME_OVERHEAD
768 bl .unrecoverable_exception
769 b unrecov_restore
770
771#ifdef CONFIG_PPC_RTAS
772/*
773 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
774 * called with the MMU off.
775 *
776 * In addition, we need to be in 32b mode, at least for now.
777 *
778 * Note: r3 is an input parameter to rtas, so don't trash it...
779 */
780_GLOBAL(enter_rtas)
781 mflr r0
782 std r0,16(r1)
783 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
784
785 /* Because RTAS is running in 32b mode, it clobbers the high order half
786 * of all registers that it saves. We therefore save those registers
787 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
788 */
789 SAVE_GPR(2, r1) /* Save the TOC */
790 SAVE_GPR(13, r1) /* Save paca */
791 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
792 SAVE_10GPRS(22, r1) /* ditto */
793
794 mfcr r4
795 std r4,_CCR(r1)
796 mfctr r5
797 std r5,_CTR(r1)
798 mfspr r6,SPRN_XER
799 std r6,_XER(r1)
800 mfdar r7
801 std r7,_DAR(r1)
802 mfdsisr r8
803 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000804
Mike Kravetz9fe901d2006-03-27 15:20:00 -0800805 /* Temporary workaround to clear CR until RTAS can be modified to
806 * ignore all bits.
807 */
808 li r0,0
809 mtcr r0
810
David Woodhouse007d88d2007-01-01 18:45:34 +0000811#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +1000812 /* There is no way it is acceptable to get here with interrupts enabled,
813 * check it with the asm equivalent of WARN_ON
814 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000815 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10008161: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +0000817 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
818#endif
819
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000820 /* Hard-disable interrupts */
821 mfmsr r6
822 rldicl r7,r6,48,1
823 rotldi r7,r7,16
824 mtmsrd r7,1
825
Paul Mackerras9994a332005-10-10 22:36:14 +1000826 /* Unfortunately, the stack pointer and the MSR are also clobbered,
827 * so they are saved in the PACA which allows us to restore
828 * our original state after RTAS returns.
829 */
830 std r1,PACAR1(r13)
831 std r6,PACASAVEDMSR(r13)
832
833 /* Setup our real return addr */
David Gibsone58c3492006-01-13 14:56:25 +1100834 LOAD_REG_ADDR(r4,.rtas_return_loc)
835 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000836 mtlr r4
837
838 li r0,0
839 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
840 andc r0,r6,r0
841
842 li r9,1
843 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Anton Blanchard44c9f3c2010-02-07 19:37:29 +0000844 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
Paul Mackerras9994a332005-10-10 22:36:14 +1000845 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +1000846 sync /* disable interrupts so SRR0/1 */
847 mtmsrd r0 /* don't get trashed */
848
David Gibsone58c3492006-01-13 14:56:25 +1100849 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +1000850 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
851 ld r4,RTASBASE(r4) /* get the rtas->base value */
852
853 mtspr SPRN_SRR0,r5
854 mtspr SPRN_SRR1,r6
855 rfid
856 b . /* prevent speculative execution */
857
858_STATIC(rtas_return_loc)
859 /* relocation is off at this point */
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +1100860 GET_PACA(r4)
David Gibsone58c3492006-01-13 14:56:25 +1100861 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +1000862
Paul Mackerrase31aa452008-08-30 11:41:12 +1000863 bcl 20,31,$+4
8640: mflr r3
865 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
866
Paul Mackerras9994a332005-10-10 22:36:14 +1000867 mfmsr r6
868 li r0,MSR_RI
869 andc r6,r6,r0
870 sync
871 mtmsrd r6
872
873 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +1000874 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
875
876 mtspr SPRN_SRR0,r3
877 mtspr SPRN_SRR1,r4
878 rfid
879 b . /* prevent speculative execution */
880
Paul Mackerrase31aa452008-08-30 11:41:12 +1000881 .align 3
8821: .llong .rtas_restore_regs
883
Paul Mackerras9994a332005-10-10 22:36:14 +1000884_STATIC(rtas_restore_regs)
885 /* relocation is on at this point */
886 REST_GPR(2, r1) /* Restore the TOC */
887 REST_GPR(13, r1) /* Restore paca */
888 REST_8GPRS(14, r1) /* Restore the non-volatiles */
889 REST_10GPRS(22, r1) /* ditto */
890
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +1100891 GET_PACA(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +1000892
893 ld r4,_CCR(r1)
894 mtcr r4
895 ld r5,_CTR(r1)
896 mtctr r5
897 ld r6,_XER(r1)
898 mtspr SPRN_XER,r6
899 ld r7,_DAR(r1)
900 mtdar r7
901 ld r8,_DSISR(r1)
902 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +1000903
904 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
905 ld r0,16(r1) /* get return address */
906
907 mtlr r0
908 blr /* return to caller */
909
910#endif /* CONFIG_PPC_RTAS */
911
Paul Mackerras9994a332005-10-10 22:36:14 +1000912_GLOBAL(enter_prom)
913 mflr r0
914 std r0,16(r1)
915 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
916
917 /* Because PROM is running in 32b mode, it clobbers the high order half
918 * of all registers that it saves. We therefore save those registers
919 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
920 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000921 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000922 SAVE_GPR(13, r1)
923 SAVE_8GPRS(14, r1)
924 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000925 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +1000926 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000927 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000928 std r11,_MSR(r1)
929
930 /* Get the PROM entrypoint */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000931 mtlr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000932
933 /* Switch MSR to 32 bits mode
934 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000935#ifdef CONFIG_PPC_BOOK3E
936 rlwinm r11,r11,0,1,31
937 mtmsr r11
938#else /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000939 mfmsr r11
940 li r12,1
941 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
942 andc r11,r11,r12
943 li r12,1
944 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
945 andc r11,r11,r12
946 mtmsrd r11
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000947#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000948 isync
949
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000950 /* Enter PROM here... */
Paul Mackerras9994a332005-10-10 22:36:14 +1000951 blrl
952
953 /* Just make sure that r1 top 32 bits didn't get
954 * corrupt by OF
955 */
956 rldicl r1,r1,0,32
957
958 /* Restore the MSR (back to 64 bits) */
959 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +0000960 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +1000961 isync
962
963 /* Restore other registers */
964 REST_GPR(2, r1)
965 REST_GPR(13, r1)
966 REST_8GPRS(14, r1)
967 REST_10GPRS(22, r1)
968 ld r4,_CCR(r1)
969 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000970
971 addi r1,r1,PROM_FRAME_SIZE
972 ld r0,16(r1)
973 mtlr r0
974 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -0400975
Steven Rostedt606576c2008-10-06 19:06:12 -0400976#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -0400977#ifdef CONFIG_DYNAMIC_FTRACE
978_GLOBAL(mcount)
979_GLOBAL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -0400980 blr
981
982_GLOBAL(ftrace_caller)
983 /* Taken from output of objdump from lib64/glibc */
984 mflr r3
985 ld r11, 0(r1)
986 stdu r1, -112(r1)
987 std r3, 128(r1)
988 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530989 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -0400990.globl ftrace_call
991ftrace_call:
992 bl ftrace_stub
993 nop
Steven Rostedt46542882009-02-10 22:19:54 -0800994#ifdef CONFIG_FUNCTION_GRAPH_TRACER
995.globl ftrace_graph_call
996ftrace_graph_call:
997 b ftrace_graph_stub
998_GLOBAL(ftrace_graph_stub)
999#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001000 ld r0, 128(r1)
1001 mtlr r0
1002 addi r1, r1, 112
1003_GLOBAL(ftrace_stub)
1004 blr
1005#else
1006_GLOBAL(mcount)
1007 blr
1008
1009_GLOBAL(_mcount)
1010 /* Taken from output of objdump from lib64/glibc */
1011 mflr r3
1012 ld r11, 0(r1)
1013 stdu r1, -112(r1)
1014 std r3, 128(r1)
1015 ld r4, 16(r11)
1016
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301017 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001018 LOAD_REG_ADDR(r5,ftrace_trace_function)
1019 ld r5,0(r5)
1020 ld r5,0(r5)
1021 mtctr r5
1022 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001023 nop
Steven Rostedt6794c782009-02-09 21:10:27 -08001024
1025
1026#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1027 b ftrace_graph_caller
1028#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001029 ld r0, 128(r1)
1030 mtlr r0
1031 addi r1, r1, 112
1032_GLOBAL(ftrace_stub)
1033 blr
1034
Steven Rostedt6794c782009-02-09 21:10:27 -08001035#endif /* CONFIG_DYNAMIC_FTRACE */
1036
1037#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt46542882009-02-10 22:19:54 -08001038_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001039 /* load r4 with local address */
1040 ld r4, 128(r1)
1041 subi r4, r4, MCOUNT_INSN_SIZE
1042
1043 /* get the parent address */
1044 ld r11, 112(r1)
1045 addi r3, r11, 16
1046
1047 bl .prepare_ftrace_return
1048 nop
1049
1050 ld r0, 128(r1)
1051 mtlr r0
1052 addi r1, r1, 112
1053 blr
1054
1055_GLOBAL(return_to_handler)
1056 /* need to save return values */
Steven Rostedtbb725342009-02-11 12:45:49 -08001057 std r4, -24(r1)
1058 std r3, -16(r1)
1059 std r31, -8(r1)
1060 mr r31, r1
1061 stdu r1, -112(r1)
1062
1063 bl .ftrace_return_to_handler
1064 nop
1065
1066 /* return value has real return address */
1067 mtlr r3
1068
1069 ld r1, 0(r1)
1070 ld r4, -24(r1)
1071 ld r3, -16(r1)
1072 ld r31, -8(r1)
1073
1074 /* Jump back to real return address */
1075 blr
1076
1077_GLOBAL(mod_return_to_handler)
1078 /* need to save return values */
Steven Rostedt6794c782009-02-09 21:10:27 -08001079 std r4, -32(r1)
1080 std r3, -24(r1)
1081 /* save TOC */
1082 std r2, -16(r1)
1083 std r31, -8(r1)
1084 mr r31, r1
1085 stdu r1, -112(r1)
1086
Steven Rostedtbb725342009-02-11 12:45:49 -08001087 /*
1088 * We are in a module using the module's TOC.
1089 * Switch to our TOC to run inside the core kernel.
1090 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001091 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001092
1093 bl .ftrace_return_to_handler
1094 nop
1095
1096 /* return value has real return address */
1097 mtlr r3
1098
1099 ld r1, 0(r1)
1100 ld r4, -32(r1)
1101 ld r3, -24(r1)
1102 ld r2, -16(r1)
1103 ld r31, -8(r1)
1104
1105 /* Jump back to real return address */
1106 blr
1107#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1108#endif /* CONFIG_FUNCTION_TRACER */