blob: 6eb330a87c3606c121c0c4de59c4237102246b80 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Paul Mackerras9994a332005-10-10 22:36:14 +100022#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053033#include <asm/ftrace.h>
Stephen Rothwell46f52212010-11-18 15:06:17 +000034#include <asm/ptrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x) li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +100049 .globl mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050051 mfspr r0,SPRN_DSRR0
52 stw r0,_DSRR0(r11)
53 mfspr r0,SPRN_DSRR1
54 stw r0,_DSRR1(r11)
55 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100056
57 .globl debug_transfer_to_handler
58debug_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050059 mfspr r0,SPRN_CSRR0
60 stw r0,_CSRR0(r11)
61 mfspr r0,SPRN_CSRR1
62 stw r0,_CSRR1(r11)
63 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100064
65 .globl crit_transfer_to_handler
66crit_transfer_to_handler:
Kumar Gala70fe3af2009-02-12 16:12:40 -060067#ifdef CONFIG_PPC_BOOK3E_MMU
Kumar Galafca622c2008-04-30 05:23:21 -050068 mfspr r0,SPRN_MAS0
69 stw r0,MAS0(r11)
70 mfspr r0,SPRN_MAS1
71 stw r0,MAS1(r11)
72 mfspr r0,SPRN_MAS2
73 stw r0,MAS2(r11)
74 mfspr r0,SPRN_MAS3
75 stw r0,MAS3(r11)
76 mfspr r0,SPRN_MAS6
77 stw r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79 mfspr r0,SPRN_MAS7
80 stw r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
Kumar Gala70fe3af2009-02-12 16:12:40 -060082#endif /* CONFIG_PPC_BOOK3E_MMU */
Kumar Galafca622c2008-04-30 05:23:21 -050083#ifdef CONFIG_44x
84 mfspr r0,SPRN_MMUCR
85 stw r0,MMUCR(r11)
86#endif
87 mfspr r0,SPRN_SRR0
88 stw r0,_SRR0(r11)
89 mfspr r0,SPRN_SRR1
90 stw r0,_SRR1(r11)
91
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +000092 /* set the stack limit to the current stack
93 * and set the limit to protect the thread_info
94 * struct
95 */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +000096 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -050097 lwz r0,KSP_LIMIT(r8)
98 stw r0,SAVED_KSP_LIMIT(r11)
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +000099 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
Kumar Galafca622c2008-04-30 05:23:21 -0500100 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000101 /* fall through */
102#endif
103
104#ifdef CONFIG_40x
105 .globl crit_transfer_to_handler
106crit_transfer_to_handler:
107 lwz r0,crit_r10@l(0)
108 stw r0,GPR10(r11)
109 lwz r0,crit_r11@l(0)
110 stw r0,GPR11(r11)
Kumar Galafca622c2008-04-30 05:23:21 -0500111 mfspr r0,SPRN_SRR0
112 stw r0,crit_srr0@l(0)
113 mfspr r0,SPRN_SRR1
114 stw r0,crit_srr1@l(0)
115
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +0000116 /* set the stack limit to the current stack
117 * and set the limit to protect the thread_info
118 * struct
119 */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000120 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -0500121 lwz r0,KSP_LIMIT(r8)
122 stw r0,saved_ksp_limit@l(0)
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +0000123 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
Kumar Galafca622c2008-04-30 05:23:21 -0500124 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000125 /* fall through */
126#endif
127
128/*
129 * This code finishes saving the registers to the exception frame
130 * and jumps to the appropriate handler for the exception, turning
131 * on address translation.
132 * Note that we rely on the caller having set cr0.eq iff the exception
133 * occurred in kernel mode (i.e. MSR:PR = 0).
134 */
135 .globl transfer_to_handler_full
136transfer_to_handler_full:
137 SAVE_NVGPRS(r11)
138 /* fall through */
139
140 .globl transfer_to_handler
141transfer_to_handler:
142 stw r2,GPR2(r11)
143 stw r12,_NIP(r11)
144 stw r9,_MSR(r11)
145 andi. r2,r9,MSR_PR
146 mfctr r12
147 mfspr r2,SPRN_XER
148 stw r12,_CTR(r11)
149 stw r2,_XER(r11)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000150 mfspr r12,SPRN_SPRG_THREAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000151 addi r2,r12,-THREAD
152 tovirt(r2,r2) /* set r2 to current */
153 beq 2f /* if from user, fix up THREAD.regs */
154 addi r11,r1,STACK_FRAME_OVERHEAD
155 stw r11,PT_REGS(r12)
156#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
157 /* Check to see if the dbcr0 register is set up to debug. Use the
Kumar Gala4eaddb42008-04-09 16:15:40 -0500158 internal debug mode bit to do this. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000159 lwz r12,THREAD_DBCR0(r12)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000160 andis. r12,r12,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000161 beq+ 3f
162 /* From user and task is ptraced - load up global dbcr0 */
163 li r12,-1 /* clear all pending debug events */
164 mtspr SPRN_DBSR,r12
165 lis r11,global_dbcr0@ha
166 tophys(r11,r11)
167 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -0500168#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +0000169 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500170 lwz r9,TI_CPU(r9)
171 slwi r9,r9,3
172 add r11,r11,r9
173#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000174 lwz r12,0(r11)
175 mtspr SPRN_DBCR0,r12
176 lwz r12,4(r11)
177 addi r12,r12,-1
178 stw r12,4(r11)
179#endif
180 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000181
Paul Mackerras9994a332005-10-10 22:36:14 +10001822: /* if from kernel, check interrupted DOZE/NAP mode and
183 * check for stack overflow
184 */
Kumar Gala85218822008-04-28 16:21:22 +1000185 lwz r9,KSP_LIMIT(r12)
186 cmplw r1,r9 /* if r1 <= ksp_limit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000187 ble- stack_ovf /* then the kernel stack overflowed */
1885:
Kumar Galafc4033b2008-06-18 16:26:52 -0500189#if defined(CONFIG_6xx) || defined(CONFIG_E500)
Stuart Yoder9778b692012-07-05 04:41:35 +0000190 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000191 tophys(r9,r9) /* check local flags */
192 lwz r12,TI_LOCAL_FLAGS(r9)
193 mtcrf 0x01,r12
194 bt- 31-TLF_NAPPING,4f
Paul Mackerrasa5606432008-05-14 14:30:48 +1000195 bt- 31-TLF_SLEEPING,7f
Kumar Galafc4033b2008-06-18 16:26:52 -0500196#endif /* CONFIG_6xx || CONFIG_E500 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000197 .globl transfer_to_handler_cont
198transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10001993:
200 mflr r9
201 lwz r11,0(r9) /* virtual address of handler */
202 lwz r9,4(r9) /* where to go when done */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000203#ifdef CONFIG_TRACE_IRQFLAGS
204 lis r12,reenable_mmu@h
205 ori r12,r12,reenable_mmu@l
206 mtspr SPRN_SRR0,r12
207 mtspr SPRN_SRR1,r10
208 SYNC
209 RFI
210reenable_mmu: /* re-enable mmu so we can */
211 mfmsr r10
212 lwz r12,_MSR(r1)
213 xor r10,r10,r12
214 andi. r10,r10,MSR_EE /* Did EE change? */
215 beq 1f
216
Kevin Hao2cd76622011-11-10 16:04:17 +0000217 /*
218 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
219 * If from user mode there is only one stack frame on the stack, and
220 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
221 * stack frame to make trace_hardirqs_off happy.
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000222 *
223 * This is handy because we also need to save a bunch of GPRs,
224 * r3 can be different from GPR3(r1) at this point, r9 and r11
225 * contains the old MSR and handler address respectively,
226 * r4 & r5 can contain page fault arguments that need to be passed
227 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
228 * they aren't useful past this point (aren't syscall arguments),
229 * the rest is restored from the exception frame.
Kevin Hao2cd76622011-11-10 16:04:17 +0000230 */
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000231 stwu r1,-32(r1)
232 stw r9,8(r1)
233 stw r11,12(r1)
234 stw r3,16(r1)
235 stw r4,20(r1)
236 stw r5,24(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000237 bl trace_hardirqs_off
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000238 lwz r5,24(r1)
239 lwz r4,20(r1)
240 lwz r3,16(r1)
241 lwz r11,12(r1)
242 lwz r9,8(r1)
243 addi r1,r1,32
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000244 lwz r0,GPR0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000245 lwz r6,GPR6(r1)
246 lwz r7,GPR7(r1)
247 lwz r8,GPR8(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00002481: mtctr r11
249 mtlr r9
250 bctr /* jump to handler */
251#else /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000252 mtspr SPRN_SRR0,r11
253 mtspr SPRN_SRR1,r10
254 mtlr r9
255 SYNC
256 RFI /* jump to handler, enable MMU */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000257#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000258
Kumar Galafc4033b2008-06-18 16:26:52 -0500259#if defined (CONFIG_6xx) || defined(CONFIG_E500)
Paul Mackerrasf39224a2006-04-18 21:49:11 +10002604: rlwinm r12,r12,0,~_TLF_NAPPING
261 stw r12,TI_LOCAL_FLAGS(r9)
Kumar Galafc4033b2008-06-18 16:26:52 -0500262 b power_save_ppc32_restore
Paul Mackerrasa5606432008-05-14 14:30:48 +1000263
2647: rlwinm r12,r12,0,~_TLF_SLEEPING
265 stw r12,TI_LOCAL_FLAGS(r9)
266 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
267 rlwinm r9,r9,0,~MSR_EE
268 lwz r12,_LINK(r11) /* and return to address in LR */
269 b fast_exception_return
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100270#endif
271
Paul Mackerras9994a332005-10-10 22:36:14 +1000272/*
273 * On kernel stack overflow, load up an initial stack pointer
274 * and call StackOverflow(regs), which should not return.
275 */
276stack_ovf:
277 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000278 lis r12,_end@h
279 ori r12,r12,_end@l
280 cmplw r1,r12
281 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000282 SAVE_NVGPRS(r11)
283 addi r3,r1,STACK_FRAME_OVERHEAD
284 lis r1,init_thread_union@ha
285 addi r1,r1,init_thread_union@l
286 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
287 lis r9,StackOverflow@ha
288 addi r9,r9,StackOverflow@l
289 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
290 FIX_SRR1(r10,r12)
291 mtspr SPRN_SRR0,r9
292 mtspr SPRN_SRR1,r10
293 SYNC
294 RFI
295
296/*
297 * Handle a system call.
298 */
299 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
300 .stabs "entry_32.S",N_SO,0,0,0f
3010:
302
303_GLOBAL(DoSyscall)
Paul Mackerras9994a332005-10-10 22:36:14 +1000304 stw r3,ORIG_GPR3(r1)
305 li r12,0
306 stw r12,RESULT(r1)
307 lwz r11,_CCR(r1) /* Clear SO bit in CR */
308 rlwinm r11,r11,0,4,2
309 stw r11,_CCR(r1)
310#ifdef SHOW_SYSCALLS
311 bl do_show_syscall
312#endif /* SHOW_SYSCALLS */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000313#ifdef CONFIG_TRACE_IRQFLAGS
314 /* Return from syscalls can (and generally will) hard enable
315 * interrupts. You aren't supposed to call a syscall with
316 * interrupts disabled in the first place. However, to ensure
317 * that we get it right vs. lockdep if it happens, we force
318 * that hard enable here with appropriate tracing if we see
319 * that we have been called with interrupts off
320 */
321 mfmsr r11
322 andi. r12,r11,MSR_EE
323 bne+ 1f
324 /* We came in with interrupts disabled, we enable them now */
325 bl trace_hardirqs_on
326 mfmsr r11
327 lwz r0,GPR0(r1)
328 lwz r3,GPR3(r1)
329 lwz r4,GPR4(r1)
330 ori r11,r11,MSR_EE
331 lwz r5,GPR5(r1)
332 lwz r6,GPR6(r1)
333 lwz r7,GPR7(r1)
334 lwz r8,GPR8(r1)
335 mtmsr r11
3361:
337#endif /* CONFIG_TRACE_IRQFLAGS */
Stuart Yoder9778b692012-07-05 04:41:35 +0000338 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000339 lwz r11,TI_FLAGS(r10)
340 andi. r11,r11,_TIF_SYSCALL_T_OR_A
341 bne- syscall_dotrace
342syscall_dotrace_cont:
343 cmplwi 0,r0,NR_syscalls
344 lis r10,sys_call_table@h
345 ori r10,r10,sys_call_table@l
346 slwi r0,r0,2
347 bge- 66f
348 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
349 mtlr r10
350 addi r9,r1,STACK_FRAME_OVERHEAD
351 PPC440EP_ERR42
352 blrl /* Call handler */
353 .globl ret_from_syscall
354ret_from_syscall:
355#ifdef SHOW_SYSCALLS
356 bl do_show_syscall_exit
357#endif
358 mr r6,r3
Stuart Yoder9778b692012-07-05 04:41:35 +0000359 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000360 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000361 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000362 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000363 SYNC
364 MTMSRD(r10)
365 lwz r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000366 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100367 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000368 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000369 cmplw 0,r3,r8
370 blt+ syscall_exit_cont
371 lwz r11,_CCR(r1) /* Load CR */
372 neg r3,r3
373 oris r11,r11,0x1000 /* Set SO bit in CR */
374 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000375syscall_exit_cont:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000376 lwz r8,_MSR(r1)
377#ifdef CONFIG_TRACE_IRQFLAGS
378 /* If we are going to return from the syscall with interrupts
379 * off, we trace that here. It shouldn't happen though but we
380 * want to catch the bugger if it does right ?
381 */
382 andi. r10,r8,MSR_EE
383 bne+ 1f
384 stw r3,GPR3(r1)
385 bl trace_hardirqs_off
386 lwz r3,GPR3(r1)
3871:
388#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000389#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500390 /* If the process has its own DBCR0 value, load it up. The internal
391 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000392 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000393 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000394 bnel- load_dbcr0
395#endif
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100396#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000397BEGIN_MMU_FTR_SECTION
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100398 lis r4,icache_44x_need_flush@ha
399 lwz r5,icache_44x_need_flush@l(r4)
400 cmplwi cr0,r5,0
401 bne- 2f
4021:
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000403END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100404#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100405BEGIN_FTR_SECTION
406 lwarx r7,0,r1
407END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000408 stwcx. r0,0,r1 /* to clear the reservation */
409 lwz r4,_LINK(r1)
410 lwz r5,_CCR(r1)
411 mtlr r4
412 mtcr r5
413 lwz r7,_NIP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000414 FIX_SRR1(r8, r0)
415 lwz r2,GPR2(r1)
416 lwz r1,GPR1(r1)
417 mtspr SPRN_SRR0,r7
418 mtspr SPRN_SRR1,r8
419 SYNC
420 RFI
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100421#ifdef CONFIG_44x
4222: li r7,0
423 iccci r0,r0
424 stw r7,icache_44x_need_flush@l(r4)
425 b 1b
426#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000427
42866: li r3,-ENOSYS
429 b ret_from_syscall
430
431 .globl ret_from_fork
432ret_from_fork:
433 REST_NVGPRS(r1)
434 bl schedule_tail
435 li r3,0
436 b ret_from_syscall
437
Al Viro58254e12012-09-12 18:32:42 -0400438 .globl ret_from_kernel_thread
439ret_from_kernel_thread:
440 REST_NVGPRS(r1)
441 bl schedule_tail
442 mtlr r14
443 mr r3,r15
444 PPC440EP_ERR42
445 blrl
446 li r3,0
447 b do_exit # no return
448
Paul Mackerras9994a332005-10-10 22:36:14 +1000449/* Traced system call support */
450syscall_dotrace:
451 SAVE_NVGPRS(r1)
452 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000453 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000454 addi r3,r1,STACK_FRAME_OVERHEAD
455 bl do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000456 /*
457 * Restore argument registers possibly just changed.
458 * We use the return value of do_syscall_trace_enter
459 * for call number to look up in the table (r0).
460 */
461 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000462 lwz r3,GPR3(r1)
463 lwz r4,GPR4(r1)
464 lwz r5,GPR5(r1)
465 lwz r6,GPR6(r1)
466 lwz r7,GPR7(r1)
467 lwz r8,GPR8(r1)
468 REST_NVGPRS(r1)
469 b syscall_dotrace_cont
470
471syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000472 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100473 beq+ 0f
474 REST_NVGPRS(r1)
475 b 2f
4760: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000477 blt+ 1f
478 andi. r0,r9,_TIF_NOERROR
479 bne- 1f
480 lwz r11,_CCR(r1) /* Load CR */
481 neg r3,r3
482 oris r11,r11,0x1000 /* Set SO bit in CR */
483 stw r11,_CCR(r1)
484
4851: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000486 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00004872: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
488 beq 4f
489
Paul Mackerras1bd79332006-03-08 13:24:22 +1100490 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000491
492 li r11,_TIF_PERSYSCALL_MASK
493 addi r12,r12,TI_FLAGS
4943: lwarx r8,0,r12
495 andc r8,r8,r11
496#ifdef CONFIG_IBM405_ERR77
497 dcbt 0,r12
498#endif
499 stwcx. r8,0,r12
500 bne- 3b
501 subi r12,r12,TI_FLAGS
502
5034: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100504 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
505 beq ret_from_except
506
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000507 /* Re-enable interrupts. There is no need to trace that with
508 * lockdep as we are supposed to have IRQs on at this point
509 */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100510 ori r10,r10,MSR_EE
511 SYNC
512 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000513
514 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000515 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000516 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000517 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000518 SAVE_NVGPRS(r1)
519 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000520 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11005215:
Paul Mackerras9994a332005-10-10 22:36:14 +1000522 addi r3,r1,STACK_FRAME_OVERHEAD
523 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100524 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000525
Paul Mackerras9994a332005-10-10 22:36:14 +1000526#ifdef SHOW_SYSCALLS
527do_show_syscall:
528#ifdef SHOW_SYSCALLS_TASK
529 lis r11,show_syscalls_task@ha
530 lwz r11,show_syscalls_task@l(r11)
531 cmp 0,r2,r11
532 bnelr
533#endif
534 stw r31,GPR31(r1)
535 mflr r31
536 lis r3,7f@ha
537 addi r3,r3,7f@l
538 lwz r4,GPR0(r1)
539 lwz r5,GPR3(r1)
540 lwz r6,GPR4(r1)
541 lwz r7,GPR5(r1)
542 lwz r8,GPR6(r1)
543 lwz r9,GPR7(r1)
544 bl printk
545 lis r3,77f@ha
546 addi r3,r3,77f@l
547 lwz r4,GPR8(r1)
548 mr r5,r2
549 bl printk
550 lwz r0,GPR0(r1)
551 lwz r3,GPR3(r1)
552 lwz r4,GPR4(r1)
553 lwz r5,GPR5(r1)
554 lwz r6,GPR6(r1)
555 lwz r7,GPR7(r1)
556 lwz r8,GPR8(r1)
557 mtlr r31
558 lwz r31,GPR31(r1)
559 blr
560
561do_show_syscall_exit:
562#ifdef SHOW_SYSCALLS_TASK
563 lis r11,show_syscalls_task@ha
564 lwz r11,show_syscalls_task@l(r11)
565 cmp 0,r2,r11
566 bnelr
567#endif
568 stw r31,GPR31(r1)
569 mflr r31
570 stw r3,RESULT(r1) /* Save result */
571 mr r4,r3
572 lis r3,79f@ha
573 addi r3,r3,79f@l
574 bl printk
575 lwz r3,RESULT(r1)
576 mtlr r31
577 lwz r31,GPR31(r1)
578 blr
579
5807: .string "syscall %d(%x, %x, %x, %x, %x, "
58177: .string "%x), current=%p\n"
58279: .string " -> %x\n"
583 .align 2,0
584
585#ifdef SHOW_SYSCALLS_TASK
586 .data
587 .globl show_syscalls_task
588show_syscalls_task:
589 .long -1
590 .text
591#endif
592#endif /* SHOW_SYSCALLS */
593
594/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000595 * The fork/clone functions need to copy the full register set into
596 * the child process. Therefore we need to save all the nonvolatile
597 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000598 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000599 .globl ppc_fork
600ppc_fork:
601 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000602 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000603 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000604 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000605 b sys_fork
606
607 .globl ppc_vfork
608ppc_vfork:
609 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000610 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000611 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000612 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000613 b sys_vfork
614
615 .globl ppc_clone
616ppc_clone:
617 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000618 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000619 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000620 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000621 b sys_clone
622
Paul Mackerras1bd79332006-03-08 13:24:22 +1100623 .globl ppc_swapcontext
624ppc_swapcontext:
625 SAVE_NVGPRS(r1)
626 lwz r0,_TRAP(r1)
627 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
628 stw r0,_TRAP(r1) /* register set saved */
629 b sys_swapcontext
630
Paul Mackerras9994a332005-10-10 22:36:14 +1000631/*
632 * Top-level page fault handling.
633 * This is in assembler because if do_page_fault tells us that
634 * it is a bad kernel page fault, we want to save the non-volatile
635 * registers before calling bad_page_fault.
636 */
637 .globl handle_page_fault
638handle_page_fault:
639 stw r4,_DAR(r1)
640 addi r3,r1,STACK_FRAME_OVERHEAD
641 bl do_page_fault
642 cmpwi r3,0
643 beq+ ret_from_except
644 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000645 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000646 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000647 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000648 mr r5,r3
649 addi r3,r1,STACK_FRAME_OVERHEAD
650 lwz r4,_DAR(r1)
651 bl bad_page_fault
652 b ret_from_except_full
653
654/*
655 * This routine switches between two different tasks. The process
656 * state of one is saved on its kernel stack. Then the state
657 * of the other is restored from its kernel stack. The memory
658 * management hardware is updated to the second process's state.
659 * Finally, we can return to the second process.
660 * On entry, r3 points to the THREAD for the current task, r4
661 * points to the THREAD for the new task.
662 *
663 * This routine is always called with interrupts disabled.
664 *
665 * Note: there are two ways to get to the "going out" portion
666 * of this code; either by coming in via the entry (_switch)
667 * or via "fork" which must set up an environment equivalent
668 * to the "_switch" path. If you change this , you'll have to
669 * change the fork code also.
670 *
671 * The code which creates the new task context is in 'copy_thread'
672 * in arch/ppc/kernel/process.c
673 */
674_GLOBAL(_switch)
675 stwu r1,-INT_FRAME_SIZE(r1)
676 mflr r0
677 stw r0,INT_FRAME_SIZE+4(r1)
678 /* r3-r12 are caller saved -- Cort */
679 SAVE_NVGPRS(r1)
680 stw r0,_NIP(r1) /* Return to switch caller */
681 mfmsr r11
682 li r0,MSR_FP /* Disable floating-point */
683#ifdef CONFIG_ALTIVEC
684BEGIN_FTR_SECTION
685 oris r0,r0,MSR_VEC@h /* Disable altivec */
686 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
687 stw r12,THREAD+THREAD_VRSAVE(r2)
688END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
689#endif /* CONFIG_ALTIVEC */
690#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500691BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000692 oris r0,r0,MSR_SPE@h /* Disable SPE */
693 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
694 stw r12,THREAD+THREAD_SPEFSCR(r2)
Kumar Gala5e14d212007-09-13 01:44:20 -0500695END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000696#endif /* CONFIG_SPE */
697 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
698 beq+ 1f
699 andc r11,r11,r0
700 MTMSRD(r11)
701 isync
7021: stw r11,_MSR(r1)
703 mfcr r10
704 stw r10,_CCR(r1)
705 stw r1,KSP(r3) /* Set old stack pointer */
706
707#ifdef CONFIG_SMP
708 /* We need a sync somewhere here to make sure that if the
709 * previous task gets rescheduled on another CPU, it sees all
710 * stores it has performed on this one.
711 */
712 sync
713#endif /* CONFIG_SMP */
714
715 tophys(r0,r4)
716 CLR_TOP32(r0)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000717 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
Paul Mackerras9994a332005-10-10 22:36:14 +1000718 lwz r1,KSP(r4) /* Load new stack pointer */
719
720 /* save the old current 'last' for return value */
721 mr r3,r2
722 addi r2,r4,-THREAD /* Update current */
723
724#ifdef CONFIG_ALTIVEC
725BEGIN_FTR_SECTION
726 lwz r0,THREAD+THREAD_VRSAVE(r2)
727 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
728END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
729#endif /* CONFIG_ALTIVEC */
730#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500731BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000732 lwz r0,THREAD+THREAD_SPEFSCR(r2)
733 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
Kumar Gala5e14d212007-09-13 01:44:20 -0500734END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000735#endif /* CONFIG_SPE */
736
737 lwz r0,_CCR(r1)
738 mtcrf 0xFF,r0
739 /* r3-r12 are destroyed -- Cort */
740 REST_NVGPRS(r1)
741
742 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
743 mtlr r4
744 addi r1,r1,INT_FRAME_SIZE
745 blr
746
747 .globl fast_exception_return
748fast_exception_return:
749#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
750 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
751 beq 1f /* if not, we've got problems */
752#endif
753
7542: REST_4GPRS(3, r11)
755 lwz r10,_CCR(r11)
756 REST_GPR(1, r11)
757 mtcr r10
758 lwz r10,_LINK(r11)
759 mtlr r10
760 REST_GPR(10, r11)
761 mtspr SPRN_SRR1,r9
762 mtspr SPRN_SRR0,r12
763 REST_GPR(9, r11)
764 REST_GPR(12, r11)
765 lwz r11,GPR11(r11)
766 SYNC
767 RFI
768
769#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
770/* check if the exception happened in a restartable section */
7711: lis r3,exc_exit_restart_end@ha
772 addi r3,r3,exc_exit_restart_end@l
773 cmplw r12,r3
774 bge 3f
775 lis r4,exc_exit_restart@ha
776 addi r4,r4,exc_exit_restart@l
777 cmplw r12,r4
778 blt 3f
779 lis r3,fee_restarts@ha
780 tophys(r3,r3)
781 lwz r5,fee_restarts@l(r3)
782 addi r5,r5,1
783 stw r5,fee_restarts@l(r3)
784 mr r12,r4 /* restart at exc_exit_restart */
785 b 2b
786
Kumar Gala991eb432007-05-14 17:11:58 -0500787 .section .bss
788 .align 2
789fee_restarts:
790 .space 4
791 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +1000792
793/* aargh, a nonrecoverable interrupt, panic */
794/* aargh, we don't know which trap this is */
795/* but the 601 doesn't implement the RI bit, so assume it's OK */
7963:
797BEGIN_FTR_SECTION
798 b 2b
799END_FTR_SECTION_IFSET(CPU_FTR_601)
800 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000801 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000802 addi r3,r1,STACK_FRAME_OVERHEAD
803 lis r10,MSR_KERNEL@h
804 ori r10,r10,MSR_KERNEL@l
805 bl transfer_to_handler_full
806 .long nonrecoverable_exception
807 .long ret_from_except
808#endif
809
Paul Mackerras9994a332005-10-10 22:36:14 +1000810 .globl ret_from_except_full
811ret_from_except_full:
812 REST_NVGPRS(r1)
813 /* fall through */
814
815 .globl ret_from_except
816ret_from_except:
817 /* Hard-disable interrupts so that current_thread_info()->flags
818 * can't change between when we test it and when we return
819 * from the interrupt. */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000820 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000821 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
822 SYNC /* Some chip revs have problems here... */
823 MTMSRD(r10) /* disable interrupts */
824
825 lwz r3,_MSR(r1) /* Returning to user mode? */
826 andi. r0,r3,MSR_PR
827 beq resume_kernel
828
829user_exc_return: /* r10 contains MSR_KERNEL here */
830 /* Check current_thread_info()->flags */
Stuart Yoder9778b692012-07-05 04:41:35 +0000831 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000832 lwz r9,TI_FLAGS(r9)
Roland McGrath7a101742008-04-28 17:30:37 +1000833 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000834 bne do_work
835
836restore_user:
837#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500838 /* Check whether this process has its own DBCR0 value. The internal
839 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000840 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000841 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000842 bnel- load_dbcr0
843#endif
844
845#ifdef CONFIG_PREEMPT
846 b restore
847
848/* N.B. the only way to get here is from the beq following ret_from_except. */
849resume_kernel:
850 /* check current_thread_info->preempt_count */
Stuart Yoder9778b692012-07-05 04:41:35 +0000851 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000852 lwz r0,TI_PREEMPT(r9)
853 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
854 bne restore
855 lwz r0,TI_FLAGS(r9)
856 andi. r0,r0,_TIF_NEED_RESCHED
857 beq+ restore
858 andi. r0,r3,MSR_EE /* interrupts off? */
859 beq restore /* don't schedule if so */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000860#ifdef CONFIG_TRACE_IRQFLAGS
861 /* Lockdep thinks irqs are enabled, we need to call
862 * preempt_schedule_irq with IRQs off, so we inform lockdep
863 * now that we -did- turn them off already
864 */
865 bl trace_hardirqs_off
866#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10008671: bl preempt_schedule_irq
Stuart Yoder9778b692012-07-05 04:41:35 +0000868 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000869 lwz r3,TI_FLAGS(r9)
870 andi. r0,r3,_TIF_NEED_RESCHED
871 bne- 1b
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000872#ifdef CONFIG_TRACE_IRQFLAGS
873 /* And now, to properly rebalance the above, we tell lockdep they
874 * are being turned back on, which will happen when we return
875 */
876 bl trace_hardirqs_on
877#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000878#else
879resume_kernel:
880#endif /* CONFIG_PREEMPT */
881
882 /* interrupts are hard-disabled at this point */
883restore:
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100884#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000885BEGIN_MMU_FTR_SECTION
886 b 1f
887END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100888 lis r4,icache_44x_need_flush@ha
889 lwz r5,icache_44x_need_flush@l(r4)
890 cmplwi cr0,r5,0
891 beq+ 1f
892 li r6,0
893 iccci r0,r0
894 stw r6,icache_44x_need_flush@l(r4)
8951:
896#endif /* CONFIG_44x */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000897
898 lwz r9,_MSR(r1)
899#ifdef CONFIG_TRACE_IRQFLAGS
900 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
901 * off in this assembly code while peeking at TI_FLAGS() and such. However
902 * we need to inform it if the exception turned interrupts off, and we
903 * are about to trun them back on.
904 *
905 * The problem here sadly is that we don't know whether the exceptions was
906 * one that turned interrupts off or not. So we always tell lockdep about
907 * turning them on here when we go back to wherever we came from with EE
908 * on, even if that may meen some redudant calls being tracked. Maybe later
909 * we could encode what the exception did somewhere or test the exception
910 * type in the pt_regs but that sounds overkill
911 */
912 andi. r10,r9,MSR_EE
913 beq 1f
Steven Rostedt06ca2182010-12-22 16:42:56 +0000914 /*
915 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
916 * which is the stack frame here, we need to force a stack frame
917 * in case we came from user space.
918 */
919 stwu r1,-32(r1)
920 mflr r0
921 stw r0,4(r1)
922 stwu r1,-32(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000923 bl trace_hardirqs_on
Steven Rostedt06ca2182010-12-22 16:42:56 +0000924 lwz r1,0(r1)
925 lwz r1,0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000926 lwz r9,_MSR(r1)
9271:
928#endif /* CONFIG_TRACE_IRQFLAGS */
929
Paul Mackerras9994a332005-10-10 22:36:14 +1000930 lwz r0,GPR0(r1)
931 lwz r2,GPR2(r1)
932 REST_4GPRS(3, r1)
933 REST_2GPRS(7, r1)
934
935 lwz r10,_XER(r1)
936 lwz r11,_CTR(r1)
937 mtspr SPRN_XER,r10
938 mtctr r11
939
940 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100941BEGIN_FTR_SECTION
942 lwarx r11,0,r1
943END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000944 stwcx. r0,0,r1 /* to clear the reservation */
945
946#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
Paul Mackerras9994a332005-10-10 22:36:14 +1000947 andi. r10,r9,MSR_RI /* check if this exception occurred */
948 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
949
950 lwz r10,_CCR(r1)
951 lwz r11,_LINK(r1)
952 mtcrf 0xFF,r10
953 mtlr r11
954
955 /*
956 * Once we put values in SRR0 and SRR1, we are in a state
957 * where exceptions are not recoverable, since taking an
958 * exception will trash SRR0 and SRR1. Therefore we clear the
959 * MSR:RI bit to indicate this. If we do take an exception,
960 * we can't return to the point of the exception but we
961 * can restart the exception exit path at the label
962 * exc_exit_restart below. -- paulus
963 */
964 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
965 SYNC
966 MTMSRD(r10) /* clear the RI bit */
967 .globl exc_exit_restart
968exc_exit_restart:
Paul Mackerras9994a332005-10-10 22:36:14 +1000969 lwz r12,_NIP(r1)
970 FIX_SRR1(r9,r10)
971 mtspr SPRN_SRR0,r12
972 mtspr SPRN_SRR1,r9
973 REST_4GPRS(9, r1)
974 lwz r1,GPR1(r1)
975 .globl exc_exit_restart_end
976exc_exit_restart_end:
977 SYNC
978 RFI
979
980#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
981 /*
982 * This is a bit different on 4xx/Book-E because it doesn't have
983 * the RI bit in the MSR.
984 * The TLB miss handler checks if we have interrupted
985 * the exception exit path and restarts it if so
986 * (well maybe one day it will... :).
987 */
988 lwz r11,_LINK(r1)
989 mtlr r11
990 lwz r10,_CCR(r1)
991 mtcrf 0xff,r10
992 REST_2GPRS(9, r1)
993 .globl exc_exit_restart
994exc_exit_restart:
995 lwz r11,_NIP(r1)
996 lwz r12,_MSR(r1)
997exc_exit_start:
998 mtspr SPRN_SRR0,r11
999 mtspr SPRN_SRR1,r12
1000 REST_2GPRS(11, r1)
1001 lwz r1,GPR1(r1)
1002 .globl exc_exit_restart_end
1003exc_exit_restart_end:
1004 PPC405_ERR77_SYNC
1005 rfi
1006 b . /* prevent prefetch past rfi */
1007
1008/*
1009 * Returning from a critical interrupt in user mode doesn't need
1010 * to be any different from a normal exception. For a critical
1011 * interrupt in the kernel, we just return (without checking for
1012 * preemption) since the interrupt may have happened at some crucial
1013 * place (e.g. inside the TLB miss handler), and because we will be
1014 * running with r1 pointing into critical_stack, not the current
1015 * process's kernel stack (and therefore current_thread_info() will
1016 * give the wrong answer).
1017 * We have to restore various SPRs that may have been in use at the
1018 * time of the critical interrupt.
1019 *
1020 */
1021#ifdef CONFIG_40x
1022#define PPC_40x_TURN_OFF_MSR_DR \
1023 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1024 * assume the instructions here are mapped by a pinned TLB entry */ \
1025 li r10,MSR_IR; \
1026 mtmsr r10; \
1027 isync; \
1028 tophys(r1, r1);
1029#else
1030#define PPC_40x_TURN_OFF_MSR_DR
1031#endif
1032
1033#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1034 REST_NVGPRS(r1); \
1035 lwz r3,_MSR(r1); \
1036 andi. r3,r3,MSR_PR; \
1037 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1038 bne user_exc_return; \
1039 lwz r0,GPR0(r1); \
1040 lwz r2,GPR2(r1); \
1041 REST_4GPRS(3, r1); \
1042 REST_2GPRS(7, r1); \
1043 lwz r10,_XER(r1); \
1044 lwz r11,_CTR(r1); \
1045 mtspr SPRN_XER,r10; \
1046 mtctr r11; \
1047 PPC405_ERR77(0,r1); \
1048 stwcx. r0,0,r1; /* to clear the reservation */ \
1049 lwz r11,_LINK(r1); \
1050 mtlr r11; \
1051 lwz r10,_CCR(r1); \
1052 mtcrf 0xff,r10; \
1053 PPC_40x_TURN_OFF_MSR_DR; \
1054 lwz r9,_DEAR(r1); \
1055 lwz r10,_ESR(r1); \
1056 mtspr SPRN_DEAR,r9; \
1057 mtspr SPRN_ESR,r10; \
1058 lwz r11,_NIP(r1); \
1059 lwz r12,_MSR(r1); \
1060 mtspr exc_lvl_srr0,r11; \
1061 mtspr exc_lvl_srr1,r12; \
1062 lwz r9,GPR9(r1); \
1063 lwz r12,GPR12(r1); \
1064 lwz r10,GPR10(r1); \
1065 lwz r11,GPR11(r1); \
1066 lwz r1,GPR1(r1); \
1067 PPC405_ERR77_SYNC; \
1068 exc_lvl_rfi; \
1069 b .; /* prevent prefetch past exc_lvl_rfi */
1070
Kumar Galafca622c2008-04-30 05:23:21 -05001071#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1072 lwz r9,_##exc_lvl_srr0(r1); \
1073 lwz r10,_##exc_lvl_srr1(r1); \
1074 mtspr SPRN_##exc_lvl_srr0,r9; \
1075 mtspr SPRN_##exc_lvl_srr1,r10;
1076
Kumar Gala70fe3af2009-02-12 16:12:40 -06001077#if defined(CONFIG_PPC_BOOK3E_MMU)
Kumar Galafca622c2008-04-30 05:23:21 -05001078#ifdef CONFIG_PHYS_64BIT
1079#define RESTORE_MAS7 \
1080 lwz r11,MAS7(r1); \
1081 mtspr SPRN_MAS7,r11;
1082#else
1083#define RESTORE_MAS7
1084#endif /* CONFIG_PHYS_64BIT */
1085#define RESTORE_MMU_REGS \
1086 lwz r9,MAS0(r1); \
1087 lwz r10,MAS1(r1); \
1088 lwz r11,MAS2(r1); \
1089 mtspr SPRN_MAS0,r9; \
1090 lwz r9,MAS3(r1); \
1091 mtspr SPRN_MAS1,r10; \
1092 lwz r10,MAS6(r1); \
1093 mtspr SPRN_MAS2,r11; \
1094 mtspr SPRN_MAS3,r9; \
1095 mtspr SPRN_MAS6,r10; \
1096 RESTORE_MAS7;
1097#elif defined(CONFIG_44x)
1098#define RESTORE_MMU_REGS \
1099 lwz r9,MMUCR(r1); \
1100 mtspr SPRN_MMUCR,r9;
1101#else
1102#define RESTORE_MMU_REGS
1103#endif
1104
1105#ifdef CONFIG_40x
Paul Mackerras9994a332005-10-10 22:36:14 +10001106 .globl ret_from_crit_exc
1107ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001108 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001109 lis r10,saved_ksp_limit@ha;
1110 lwz r10,saved_ksp_limit@l(r10);
1111 tovirt(r9,r9);
1112 stw r10,KSP_LIMIT(r9)
1113 lis r9,crit_srr0@ha;
1114 lwz r9,crit_srr0@l(r9);
1115 lis r10,crit_srr1@ha;
1116 lwz r10,crit_srr1@l(r10);
1117 mtspr SPRN_SRR0,r9;
1118 mtspr SPRN_SRR1,r10;
Kumar Gala16c57b32009-02-10 20:10:44 +00001119 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001120#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +10001121
1122#ifdef CONFIG_BOOKE
Kumar Galafca622c2008-04-30 05:23:21 -05001123 .globl ret_from_crit_exc
1124ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001125 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001126 lwz r10,SAVED_KSP_LIMIT(r1)
1127 stw r10,KSP_LIMIT(r9)
1128 RESTORE_xSRR(SRR0,SRR1);
1129 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001130 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001131
Paul Mackerras9994a332005-10-10 22:36:14 +10001132 .globl ret_from_debug_exc
1133ret_from_debug_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001134 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001135 lwz r10,SAVED_KSP_LIMIT(r1)
1136 stw r10,KSP_LIMIT(r9)
1137 lwz r9,THREAD_INFO-THREAD(r9)
Stuart Yoder9778b692012-07-05 04:41:35 +00001138 CURRENT_THREAD_INFO(r10, r1)
Kumar Galafca622c2008-04-30 05:23:21 -05001139 lwz r10,TI_PREEMPT(r10)
1140 stw r10,TI_PREEMPT(r9)
1141 RESTORE_xSRR(SRR0,SRR1);
1142 RESTORE_xSRR(CSRR0,CSRR1);
1143 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001144 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001145
1146 .globl ret_from_mcheck_exc
1147ret_from_mcheck_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001148 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001149 lwz r10,SAVED_KSP_LIMIT(r1)
1150 stw r10,KSP_LIMIT(r9)
1151 RESTORE_xSRR(SRR0,SRR1);
1152 RESTORE_xSRR(CSRR0,CSRR1);
1153 RESTORE_xSRR(DSRR0,DSRR1);
1154 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001155 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001156#endif /* CONFIG_BOOKE */
1157
1158/*
1159 * Load the DBCR0 value for a task that is being ptraced,
1160 * having first saved away the global DBCR0. Note that r0
1161 * has the dbcr0 value to set upon entry to this.
1162 */
1163load_dbcr0:
1164 mfmsr r10 /* first disable debug exceptions */
1165 rlwinm r10,r10,0,~MSR_DE
1166 mtmsr r10
1167 isync
1168 mfspr r10,SPRN_DBCR0
1169 lis r11,global_dbcr0@ha
1170 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -05001171#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +00001172 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -05001173 lwz r9,TI_CPU(r9)
1174 slwi r9,r9,3
1175 add r11,r11,r9
1176#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10001177 stw r10,0(r11)
1178 mtspr SPRN_DBCR0,r0
1179 lwz r10,4(r11)
1180 addi r10,r10,1
1181 stw r10,4(r11)
1182 li r11,-1
1183 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1184 blr
1185
Kumar Gala991eb432007-05-14 17:11:58 -05001186 .section .bss
1187 .align 4
1188global_dbcr0:
Kumar Gala4eaddb42008-04-09 16:15:40 -05001189 .space 8*NR_CPUS
Kumar Gala991eb432007-05-14 17:11:58 -05001190 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001191#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1192
1193do_work: /* r10 contains MSR_KERNEL here */
1194 andi. r0,r9,_TIF_NEED_RESCHED
1195 beq do_user_signal
1196
1197do_resched: /* r10 contains MSR_KERNEL here */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001198 /* Note: We don't need to inform lockdep that we are enabling
1199 * interrupts here. As far as it knows, they are already enabled
1200 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001201 ori r10,r10,MSR_EE
1202 SYNC
1203 MTMSRD(r10) /* hard-enable interrupts */
1204 bl schedule
1205recheck:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001206 /* Note: And we don't tell it we are disabling them again
1207 * neither. Those disable/enable cycles used to peek at
1208 * TI_FLAGS aren't advertised.
1209 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001210 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1211 SYNC
1212 MTMSRD(r10) /* disable interrupts */
Stuart Yoder9778b692012-07-05 04:41:35 +00001213 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001214 lwz r9,TI_FLAGS(r9)
1215 andi. r0,r9,_TIF_NEED_RESCHED
1216 bne- do_resched
Roland McGrath7a101742008-04-28 17:30:37 +10001217 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +10001218 beq restore_user
1219do_user_signal: /* r10 contains MSR_KERNEL here */
1220 ori r10,r10,MSR_EE
1221 SYNC
1222 MTMSRD(r10) /* hard-enable interrupts */
1223 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001224 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001225 andi. r0,r3,1
1226 beq 2f
1227 SAVE_NVGPRS(r1)
1228 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001229 stw r3,_TRAP(r1)
Roland McGrath7d6d6372008-07-27 16:52:52 +100012302: addi r3,r1,STACK_FRAME_OVERHEAD
1231 mr r4,r9
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +11001232 bl do_notify_resume
Paul Mackerras9994a332005-10-10 22:36:14 +10001233 REST_NVGPRS(r1)
1234 b recheck
1235
1236/*
1237 * We come here when we are at the end of handling an exception
1238 * that occurred at a place where taking an exception will lose
1239 * state information, such as the contents of SRR0 and SRR1.
1240 */
1241nonrecoverable:
1242 lis r10,exc_exit_restart_end@ha
1243 addi r10,r10,exc_exit_restart_end@l
1244 cmplw r12,r10
1245 bge 3f
1246 lis r11,exc_exit_restart@ha
1247 addi r11,r11,exc_exit_restart@l
1248 cmplw r12,r11
1249 blt 3f
1250 lis r10,ee_restarts@ha
1251 lwz r12,ee_restarts@l(r10)
1252 addi r12,r12,1
1253 stw r12,ee_restarts@l(r10)
1254 mr r12,r11 /* restart at exc_exit_restart */
1255 blr
12563: /* OK, we can't recover, kill this process */
1257 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1258BEGIN_FTR_SECTION
1259 blr
1260END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001261 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001262 andi. r0,r3,1
1263 beq 4f
1264 SAVE_NVGPRS(r1)
1265 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001266 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100012674: addi r3,r1,STACK_FRAME_OVERHEAD
1268 bl nonrecoverable_exception
1269 /* shouldn't return */
1270 b 4b
1271
Kumar Gala991eb432007-05-14 17:11:58 -05001272 .section .bss
1273 .align 2
1274ee_restarts:
1275 .space 4
1276 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001277
1278/*
1279 * PROM code for specific machines follows. Put it
1280 * here so it's easy to add arch-specific sections later.
1281 * -- Cort
1282 */
Paul Mackerras033ef332005-10-26 17:05:24 +10001283#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +10001284/*
1285 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1286 * called with the MMU off.
1287 */
1288_GLOBAL(enter_rtas)
1289 stwu r1,-INT_FRAME_SIZE(r1)
1290 mflr r0
1291 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +11001292 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001293 lis r6,1f@ha /* physical return address for rtas */
1294 addi r6,r6,1f@l
1295 tophys(r6,r6)
1296 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +10001297 lwz r8,RTASENTRY(r4)
1298 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +10001299 mfmsr r9
1300 stw r9,8(r1)
1301 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1302 SYNC /* disable interrupts so SRR0/1 */
1303 MTMSRD(r0) /* don't get trashed */
1304 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1305 mtlr r6
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001306 mtspr SPRN_SPRG_RTAS,r7
Paul Mackerras9994a332005-10-10 22:36:14 +10001307 mtspr SPRN_SRR0,r8
1308 mtspr SPRN_SRR1,r9
1309 RFI
13101: tophys(r9,r1)
1311 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1312 lwz r9,8(r9) /* original msr value */
1313 FIX_SRR1(r9,r0)
1314 addi r1,r1,INT_FRAME_SIZE
1315 li r0,0
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001316 mtspr SPRN_SPRG_RTAS,r0
Paul Mackerras9994a332005-10-10 22:36:14 +10001317 mtspr SPRN_SRR0,r8
1318 mtspr SPRN_SRR1,r9
1319 RFI /* return to caller */
1320
1321 .globl machine_check_in_rtas
1322machine_check_in_rtas:
1323 twi 31,0,0
1324 /* XXX load up BATs and panic */
1325
Paul Mackerras033ef332005-10-26 17:05:24 +10001326#endif /* CONFIG_PPC_RTAS */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001327
Steven Rostedt606576c2008-10-06 19:06:12 -04001328#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001329#ifdef CONFIG_DYNAMIC_FTRACE
1330_GLOBAL(mcount)
1331_GLOBAL(_mcount)
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001332 /*
1333 * It is required that _mcount on PPC32 must preserve the
1334 * link register. But we have r0 to play with. We use r0
1335 * to push the return address back to the caller of mcount
1336 * into the ctr register, restore the link register and
1337 * then jump back using the ctr register.
1338 */
1339 mflr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001340 mtctr r0
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001341 lwz r0, 4(r1)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001342 mtlr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001343 bctr
1344
1345_GLOBAL(ftrace_caller)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001346 MCOUNT_SAVE_FRAME
1347 /* r3 ends up with link register */
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301348 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001349.globl ftrace_call
1350ftrace_call:
1351 bl ftrace_stub
1352 nop
Steven Rostedt60ce8f72009-02-11 20:06:43 -05001353#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1354.globl ftrace_graph_call
1355ftrace_graph_call:
1356 b ftrace_graph_stub
1357_GLOBAL(ftrace_graph_stub)
1358#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001359 MCOUNT_RESTORE_FRAME
1360 /* old link register ends up in ctr reg */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001361 bctr
1362#else
1363_GLOBAL(mcount)
1364_GLOBAL(_mcount)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001365
1366 MCOUNT_SAVE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001367
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301368 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001369 LOAD_REG_ADDR(r5, ftrace_trace_function)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001370 lwz r5,0(r5)
Steven Rostedtccbfac22008-05-22 14:31:07 -04001371
Steven Rostedt4e491d12008-05-14 23:49:44 -04001372 mtctr r5
1373 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001374 nop
1375
Steven Rostedtfad4f472009-02-11 19:10:57 -05001376#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1377 b ftrace_graph_caller
1378#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001379 MCOUNT_RESTORE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001380 bctr
1381#endif
1382
1383_GLOBAL(ftrace_stub)
1384 blr
1385
Steven Rostedtfad4f472009-02-11 19:10:57 -05001386#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1387_GLOBAL(ftrace_graph_caller)
1388 /* load r4 with local address */
1389 lwz r4, 44(r1)
1390 subi r4, r4, MCOUNT_INSN_SIZE
1391
1392 /* get the parent address */
1393 addi r3, r1, 52
1394
1395 bl prepare_ftrace_return
1396 nop
1397
1398 MCOUNT_RESTORE_FRAME
1399 /* old link register ends up in ctr reg */
1400 bctr
1401
1402_GLOBAL(return_to_handler)
1403 /* need to save return values */
1404 stwu r1, -32(r1)
1405 stw r3, 20(r1)
1406 stw r4, 16(r1)
1407 stw r31, 12(r1)
1408 mr r31, r1
1409
1410 bl ftrace_return_to_handler
1411 nop
1412
1413 /* return value has real return address */
1414 mtlr r3
1415
1416 lwz r3, 20(r1)
1417 lwz r4, 16(r1)
1418 lwz r31,12(r1)
1419 lwz r1, 0(r1)
1420
1421 /* Jump back to real return address */
1422 blr
1423#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1424
Steven Rostedt4e491d12008-05-14 23:49:44 -04001425#endif /* CONFIG_MCOUNT */