blob: 5207d5a405e2a7302edcea701bdf384c4e8772e6 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Paul Mackerras9994a332005-10-10 22:36:14 +100022#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053033#include <asm/ftrace.h>
Stephen Rothwell46f52212010-11-18 15:06:17 +000034#include <asm/ptrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x) li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +100049 .globl mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050051 mfspr r0,SPRN_DSRR0
52 stw r0,_DSRR0(r11)
53 mfspr r0,SPRN_DSRR1
54 stw r0,_DSRR1(r11)
55 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100056
57 .globl debug_transfer_to_handler
58debug_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050059 mfspr r0,SPRN_CSRR0
60 stw r0,_CSRR0(r11)
61 mfspr r0,SPRN_CSRR1
62 stw r0,_CSRR1(r11)
63 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100064
65 .globl crit_transfer_to_handler
66crit_transfer_to_handler:
Kumar Gala70fe3af2009-02-12 16:12:40 -060067#ifdef CONFIG_PPC_BOOK3E_MMU
Kumar Galafca622c2008-04-30 05:23:21 -050068 mfspr r0,SPRN_MAS0
69 stw r0,MAS0(r11)
70 mfspr r0,SPRN_MAS1
71 stw r0,MAS1(r11)
72 mfspr r0,SPRN_MAS2
73 stw r0,MAS2(r11)
74 mfspr r0,SPRN_MAS3
75 stw r0,MAS3(r11)
76 mfspr r0,SPRN_MAS6
77 stw r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79 mfspr r0,SPRN_MAS7
80 stw r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
Kumar Gala70fe3af2009-02-12 16:12:40 -060082#endif /* CONFIG_PPC_BOOK3E_MMU */
Kumar Galafca622c2008-04-30 05:23:21 -050083#ifdef CONFIG_44x
84 mfspr r0,SPRN_MMUCR
85 stw r0,MMUCR(r11)
86#endif
87 mfspr r0,SPRN_SRR0
88 stw r0,_SRR0(r11)
89 mfspr r0,SPRN_SRR1
90 stw r0,_SRR1(r11)
91
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +000092 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -050093 lwz r0,KSP_LIMIT(r8)
94 stw r0,SAVED_KSP_LIMIT(r11)
Stuart Yoder9778b692012-07-05 04:41:35 +000095 CURRENT_THREAD_INFO(r0, r1)
Kumar Galafca622c2008-04-30 05:23:21 -050096 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +100097 /* fall through */
98#endif
99
100#ifdef CONFIG_40x
101 .globl crit_transfer_to_handler
102crit_transfer_to_handler:
103 lwz r0,crit_r10@l(0)
104 stw r0,GPR10(r11)
105 lwz r0,crit_r11@l(0)
106 stw r0,GPR11(r11)
Kumar Galafca622c2008-04-30 05:23:21 -0500107 mfspr r0,SPRN_SRR0
108 stw r0,crit_srr0@l(0)
109 mfspr r0,SPRN_SRR1
110 stw r0,crit_srr1@l(0)
111
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000112 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -0500113 lwz r0,KSP_LIMIT(r8)
114 stw r0,saved_ksp_limit@l(0)
Stuart Yoder9778b692012-07-05 04:41:35 +0000115 CURRENT_THREAD_INFO(r0, r1)
Kumar Galafca622c2008-04-30 05:23:21 -0500116 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000117 /* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127 .globl transfer_to_handler_full
128transfer_to_handler_full:
129 SAVE_NVGPRS(r11)
130 /* fall through */
131
132 .globl transfer_to_handler
133transfer_to_handler:
134 stw r2,GPR2(r11)
135 stw r12,_NIP(r11)
136 stw r9,_MSR(r11)
137 andi. r2,r9,MSR_PR
138 mfctr r12
139 mfspr r2,SPRN_XER
140 stw r12,_CTR(r11)
141 stw r2,_XER(r11)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000142 mfspr r12,SPRN_SPRG_THREAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000143 addi r2,r12,-THREAD
144 tovirt(r2,r2) /* set r2 to current */
145 beq 2f /* if from user, fix up THREAD.regs */
146 addi r11,r1,STACK_FRAME_OVERHEAD
147 stw r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149 /* Check to see if the dbcr0 register is set up to debug. Use the
Kumar Gala4eaddb42008-04-09 16:15:40 -0500150 internal debug mode bit to do this. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000151 lwz r12,THREAD_DBCR0(r12)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000152 andis. r12,r12,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000153 beq+ 3f
154 /* From user and task is ptraced - load up global dbcr0 */
155 li r12,-1 /* clear all pending debug events */
156 mtspr SPRN_DBSR,r12
157 lis r11,global_dbcr0@ha
158 tophys(r11,r11)
159 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -0500160#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +0000161 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500162 lwz r9,TI_CPU(r9)
163 slwi r9,r9,3
164 add r11,r11,r9
165#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000166 lwz r12,0(r11)
167 mtspr SPRN_DBCR0,r12
168 lwz r12,4(r11)
169 addi r12,r12,-1
170 stw r12,4(r11)
171#endif
172 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000173
Paul Mackerras9994a332005-10-10 22:36:14 +10001742: /* if from kernel, check interrupted DOZE/NAP mode and
175 * check for stack overflow
176 */
Kumar Gala85218822008-04-28 16:21:22 +1000177 lwz r9,KSP_LIMIT(r12)
178 cmplw r1,r9 /* if r1 <= ksp_limit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000179 ble- stack_ovf /* then the kernel stack overflowed */
1805:
Kumar Galafc4033b2008-06-18 16:26:52 -0500181#if defined(CONFIG_6xx) || defined(CONFIG_E500)
Stuart Yoder9778b692012-07-05 04:41:35 +0000182 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000183 tophys(r9,r9) /* check local flags */
184 lwz r12,TI_LOCAL_FLAGS(r9)
185 mtcrf 0x01,r12
186 bt- 31-TLF_NAPPING,4f
Paul Mackerrasa5606432008-05-14 14:30:48 +1000187 bt- 31-TLF_SLEEPING,7f
Kumar Galafc4033b2008-06-18 16:26:52 -0500188#endif /* CONFIG_6xx || CONFIG_E500 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000189 .globl transfer_to_handler_cont
190transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10001913:
192 mflr r9
193 lwz r11,0(r9) /* virtual address of handler */
194 lwz r9,4(r9) /* where to go when done */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000195#ifdef CONFIG_TRACE_IRQFLAGS
196 lis r12,reenable_mmu@h
197 ori r12,r12,reenable_mmu@l
198 mtspr SPRN_SRR0,r12
199 mtspr SPRN_SRR1,r10
200 SYNC
201 RFI
202reenable_mmu: /* re-enable mmu so we can */
203 mfmsr r10
204 lwz r12,_MSR(r1)
205 xor r10,r10,r12
206 andi. r10,r10,MSR_EE /* Did EE change? */
207 beq 1f
208
Kevin Hao2cd76622011-11-10 16:04:17 +0000209 /*
210 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
211 * If from user mode there is only one stack frame on the stack, and
212 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
213 * stack frame to make trace_hardirqs_off happy.
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000214 *
215 * This is handy because we also need to save a bunch of GPRs,
216 * r3 can be different from GPR3(r1) at this point, r9 and r11
217 * contains the old MSR and handler address respectively,
218 * r4 & r5 can contain page fault arguments that need to be passed
219 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
220 * they aren't useful past this point (aren't syscall arguments),
221 * the rest is restored from the exception frame.
Kevin Hao2cd76622011-11-10 16:04:17 +0000222 */
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000223 stwu r1,-32(r1)
224 stw r9,8(r1)
225 stw r11,12(r1)
226 stw r3,16(r1)
227 stw r4,20(r1)
228 stw r5,24(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000229 bl trace_hardirqs_off
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000230 lwz r5,24(r1)
231 lwz r4,20(r1)
232 lwz r3,16(r1)
233 lwz r11,12(r1)
234 lwz r9,8(r1)
235 addi r1,r1,32
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000236 lwz r0,GPR0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000237 lwz r6,GPR6(r1)
238 lwz r7,GPR7(r1)
239 lwz r8,GPR8(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00002401: mtctr r11
241 mtlr r9
242 bctr /* jump to handler */
243#else /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000244 mtspr SPRN_SRR0,r11
245 mtspr SPRN_SRR1,r10
246 mtlr r9
247 SYNC
248 RFI /* jump to handler, enable MMU */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000249#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000250
Kumar Galafc4033b2008-06-18 16:26:52 -0500251#if defined (CONFIG_6xx) || defined(CONFIG_E500)
Paul Mackerrasf39224a2006-04-18 21:49:11 +10002524: rlwinm r12,r12,0,~_TLF_NAPPING
253 stw r12,TI_LOCAL_FLAGS(r9)
Kumar Galafc4033b2008-06-18 16:26:52 -0500254 b power_save_ppc32_restore
Paul Mackerrasa5606432008-05-14 14:30:48 +1000255
2567: rlwinm r12,r12,0,~_TLF_SLEEPING
257 stw r12,TI_LOCAL_FLAGS(r9)
258 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
259 rlwinm r9,r9,0,~MSR_EE
260 lwz r12,_LINK(r11) /* and return to address in LR */
261 b fast_exception_return
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100262#endif
263
Paul Mackerras9994a332005-10-10 22:36:14 +1000264/*
265 * On kernel stack overflow, load up an initial stack pointer
266 * and call StackOverflow(regs), which should not return.
267 */
268stack_ovf:
269 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000270 lis r12,_end@h
271 ori r12,r12,_end@l
272 cmplw r1,r12
273 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000274 SAVE_NVGPRS(r11)
275 addi r3,r1,STACK_FRAME_OVERHEAD
276 lis r1,init_thread_union@ha
277 addi r1,r1,init_thread_union@l
278 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
279 lis r9,StackOverflow@ha
280 addi r9,r9,StackOverflow@l
281 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
282 FIX_SRR1(r10,r12)
283 mtspr SPRN_SRR0,r9
284 mtspr SPRN_SRR1,r10
285 SYNC
286 RFI
287
288/*
289 * Handle a system call.
290 */
291 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
292 .stabs "entry_32.S",N_SO,0,0,0f
2930:
294
295_GLOBAL(DoSyscall)
Paul Mackerras9994a332005-10-10 22:36:14 +1000296 stw r3,ORIG_GPR3(r1)
297 li r12,0
298 stw r12,RESULT(r1)
299 lwz r11,_CCR(r1) /* Clear SO bit in CR */
300 rlwinm r11,r11,0,4,2
301 stw r11,_CCR(r1)
302#ifdef SHOW_SYSCALLS
303 bl do_show_syscall
304#endif /* SHOW_SYSCALLS */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000305#ifdef CONFIG_TRACE_IRQFLAGS
306 /* Return from syscalls can (and generally will) hard enable
307 * interrupts. You aren't supposed to call a syscall with
308 * interrupts disabled in the first place. However, to ensure
309 * that we get it right vs. lockdep if it happens, we force
310 * that hard enable here with appropriate tracing if we see
311 * that we have been called with interrupts off
312 */
313 mfmsr r11
314 andi. r12,r11,MSR_EE
315 bne+ 1f
316 /* We came in with interrupts disabled, we enable them now */
317 bl trace_hardirqs_on
318 mfmsr r11
319 lwz r0,GPR0(r1)
320 lwz r3,GPR3(r1)
321 lwz r4,GPR4(r1)
322 ori r11,r11,MSR_EE
323 lwz r5,GPR5(r1)
324 lwz r6,GPR6(r1)
325 lwz r7,GPR7(r1)
326 lwz r8,GPR8(r1)
327 mtmsr r11
3281:
329#endif /* CONFIG_TRACE_IRQFLAGS */
Stuart Yoder9778b692012-07-05 04:41:35 +0000330 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000331 lwz r11,TI_FLAGS(r10)
332 andi. r11,r11,_TIF_SYSCALL_T_OR_A
333 bne- syscall_dotrace
334syscall_dotrace_cont:
335 cmplwi 0,r0,NR_syscalls
336 lis r10,sys_call_table@h
337 ori r10,r10,sys_call_table@l
338 slwi r0,r0,2
339 bge- 66f
340 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
341 mtlr r10
342 addi r9,r1,STACK_FRAME_OVERHEAD
343 PPC440EP_ERR42
344 blrl /* Call handler */
345 .globl ret_from_syscall
346ret_from_syscall:
347#ifdef SHOW_SYSCALLS
348 bl do_show_syscall_exit
349#endif
350 mr r6,r3
Stuart Yoder9778b692012-07-05 04:41:35 +0000351 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000352 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000353 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000354 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000355 SYNC
356 MTMSRD(r10)
357 lwz r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000358 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100359 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000360 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000361 cmplw 0,r3,r8
362 blt+ syscall_exit_cont
363 lwz r11,_CCR(r1) /* Load CR */
364 neg r3,r3
365 oris r11,r11,0x1000 /* Set SO bit in CR */
366 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000367syscall_exit_cont:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000368 lwz r8,_MSR(r1)
369#ifdef CONFIG_TRACE_IRQFLAGS
370 /* If we are going to return from the syscall with interrupts
371 * off, we trace that here. It shouldn't happen though but we
372 * want to catch the bugger if it does right ?
373 */
374 andi. r10,r8,MSR_EE
375 bne+ 1f
376 stw r3,GPR3(r1)
377 bl trace_hardirqs_off
378 lwz r3,GPR3(r1)
3791:
380#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000381#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500382 /* If the process has its own DBCR0 value, load it up. The internal
383 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000384 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000385 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000386 bnel- load_dbcr0
387#endif
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100388#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000389BEGIN_MMU_FTR_SECTION
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100390 lis r4,icache_44x_need_flush@ha
391 lwz r5,icache_44x_need_flush@l(r4)
392 cmplwi cr0,r5,0
393 bne- 2f
3941:
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000395END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100396#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100397BEGIN_FTR_SECTION
398 lwarx r7,0,r1
399END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000400 stwcx. r0,0,r1 /* to clear the reservation */
401 lwz r4,_LINK(r1)
402 lwz r5,_CCR(r1)
403 mtlr r4
404 mtcr r5
405 lwz r7,_NIP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000406 FIX_SRR1(r8, r0)
407 lwz r2,GPR2(r1)
408 lwz r1,GPR1(r1)
409 mtspr SPRN_SRR0,r7
410 mtspr SPRN_SRR1,r8
411 SYNC
412 RFI
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100413#ifdef CONFIG_44x
4142: li r7,0
415 iccci r0,r0
416 stw r7,icache_44x_need_flush@l(r4)
417 b 1b
418#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000419
42066: li r3,-ENOSYS
421 b ret_from_syscall
422
423 .globl ret_from_fork
424ret_from_fork:
425 REST_NVGPRS(r1)
426 bl schedule_tail
427 li r3,0
428 b ret_from_syscall
429
430/* Traced system call support */
431syscall_dotrace:
432 SAVE_NVGPRS(r1)
433 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000434 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000435 addi r3,r1,STACK_FRAME_OVERHEAD
436 bl do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000437 /*
438 * Restore argument registers possibly just changed.
439 * We use the return value of do_syscall_trace_enter
440 * for call number to look up in the table (r0).
441 */
442 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000443 lwz r3,GPR3(r1)
444 lwz r4,GPR4(r1)
445 lwz r5,GPR5(r1)
446 lwz r6,GPR6(r1)
447 lwz r7,GPR7(r1)
448 lwz r8,GPR8(r1)
449 REST_NVGPRS(r1)
450 b syscall_dotrace_cont
451
452syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000453 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100454 beq+ 0f
455 REST_NVGPRS(r1)
456 b 2f
4570: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000458 blt+ 1f
459 andi. r0,r9,_TIF_NOERROR
460 bne- 1f
461 lwz r11,_CCR(r1) /* Load CR */
462 neg r3,r3
463 oris r11,r11,0x1000 /* Set SO bit in CR */
464 stw r11,_CCR(r1)
465
4661: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000467 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00004682: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
469 beq 4f
470
Paul Mackerras1bd79332006-03-08 13:24:22 +1100471 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000472
473 li r11,_TIF_PERSYSCALL_MASK
474 addi r12,r12,TI_FLAGS
4753: lwarx r8,0,r12
476 andc r8,r8,r11
477#ifdef CONFIG_IBM405_ERR77
478 dcbt 0,r12
479#endif
480 stwcx. r8,0,r12
481 bne- 3b
482 subi r12,r12,TI_FLAGS
483
4844: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100485 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
486 beq ret_from_except
487
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000488 /* Re-enable interrupts. There is no need to trace that with
489 * lockdep as we are supposed to have IRQs on at this point
490 */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100491 ori r10,r10,MSR_EE
492 SYNC
493 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000494
495 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000496 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000497 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000498 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000499 SAVE_NVGPRS(r1)
500 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000501 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11005025:
Paul Mackerras9994a332005-10-10 22:36:14 +1000503 addi r3,r1,STACK_FRAME_OVERHEAD
504 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100505 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000506
Paul Mackerras9994a332005-10-10 22:36:14 +1000507#ifdef SHOW_SYSCALLS
508do_show_syscall:
509#ifdef SHOW_SYSCALLS_TASK
510 lis r11,show_syscalls_task@ha
511 lwz r11,show_syscalls_task@l(r11)
512 cmp 0,r2,r11
513 bnelr
514#endif
515 stw r31,GPR31(r1)
516 mflr r31
517 lis r3,7f@ha
518 addi r3,r3,7f@l
519 lwz r4,GPR0(r1)
520 lwz r5,GPR3(r1)
521 lwz r6,GPR4(r1)
522 lwz r7,GPR5(r1)
523 lwz r8,GPR6(r1)
524 lwz r9,GPR7(r1)
525 bl printk
526 lis r3,77f@ha
527 addi r3,r3,77f@l
528 lwz r4,GPR8(r1)
529 mr r5,r2
530 bl printk
531 lwz r0,GPR0(r1)
532 lwz r3,GPR3(r1)
533 lwz r4,GPR4(r1)
534 lwz r5,GPR5(r1)
535 lwz r6,GPR6(r1)
536 lwz r7,GPR7(r1)
537 lwz r8,GPR8(r1)
538 mtlr r31
539 lwz r31,GPR31(r1)
540 blr
541
542do_show_syscall_exit:
543#ifdef SHOW_SYSCALLS_TASK
544 lis r11,show_syscalls_task@ha
545 lwz r11,show_syscalls_task@l(r11)
546 cmp 0,r2,r11
547 bnelr
548#endif
549 stw r31,GPR31(r1)
550 mflr r31
551 stw r3,RESULT(r1) /* Save result */
552 mr r4,r3
553 lis r3,79f@ha
554 addi r3,r3,79f@l
555 bl printk
556 lwz r3,RESULT(r1)
557 mtlr r31
558 lwz r31,GPR31(r1)
559 blr
560
5617: .string "syscall %d(%x, %x, %x, %x, %x, "
56277: .string "%x), current=%p\n"
56379: .string " -> %x\n"
564 .align 2,0
565
566#ifdef SHOW_SYSCALLS_TASK
567 .data
568 .globl show_syscalls_task
569show_syscalls_task:
570 .long -1
571 .text
572#endif
573#endif /* SHOW_SYSCALLS */
574
575/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000576 * The fork/clone functions need to copy the full register set into
577 * the child process. Therefore we need to save all the nonvolatile
578 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000579 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000580 .globl ppc_fork
581ppc_fork:
582 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000583 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000584 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000585 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000586 b sys_fork
587
588 .globl ppc_vfork
589ppc_vfork:
590 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000591 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000592 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000593 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000594 b sys_vfork
595
596 .globl ppc_clone
597ppc_clone:
598 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000599 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000600 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000601 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000602 b sys_clone
603
Paul Mackerras1bd79332006-03-08 13:24:22 +1100604 .globl ppc_swapcontext
605ppc_swapcontext:
606 SAVE_NVGPRS(r1)
607 lwz r0,_TRAP(r1)
608 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
609 stw r0,_TRAP(r1) /* register set saved */
610 b sys_swapcontext
611
Paul Mackerras9994a332005-10-10 22:36:14 +1000612/*
613 * Top-level page fault handling.
614 * This is in assembler because if do_page_fault tells us that
615 * it is a bad kernel page fault, we want to save the non-volatile
616 * registers before calling bad_page_fault.
617 */
618 .globl handle_page_fault
619handle_page_fault:
620 stw r4,_DAR(r1)
621 addi r3,r1,STACK_FRAME_OVERHEAD
622 bl do_page_fault
623 cmpwi r3,0
624 beq+ ret_from_except
625 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000626 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000627 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000628 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000629 mr r5,r3
630 addi r3,r1,STACK_FRAME_OVERHEAD
631 lwz r4,_DAR(r1)
632 bl bad_page_fault
633 b ret_from_except_full
634
635/*
636 * This routine switches between two different tasks. The process
637 * state of one is saved on its kernel stack. Then the state
638 * of the other is restored from its kernel stack. The memory
639 * management hardware is updated to the second process's state.
640 * Finally, we can return to the second process.
641 * On entry, r3 points to the THREAD for the current task, r4
642 * points to the THREAD for the new task.
643 *
644 * This routine is always called with interrupts disabled.
645 *
646 * Note: there are two ways to get to the "going out" portion
647 * of this code; either by coming in via the entry (_switch)
648 * or via "fork" which must set up an environment equivalent
649 * to the "_switch" path. If you change this , you'll have to
650 * change the fork code also.
651 *
652 * The code which creates the new task context is in 'copy_thread'
653 * in arch/ppc/kernel/process.c
654 */
655_GLOBAL(_switch)
656 stwu r1,-INT_FRAME_SIZE(r1)
657 mflr r0
658 stw r0,INT_FRAME_SIZE+4(r1)
659 /* r3-r12 are caller saved -- Cort */
660 SAVE_NVGPRS(r1)
661 stw r0,_NIP(r1) /* Return to switch caller */
662 mfmsr r11
663 li r0,MSR_FP /* Disable floating-point */
664#ifdef CONFIG_ALTIVEC
665BEGIN_FTR_SECTION
666 oris r0,r0,MSR_VEC@h /* Disable altivec */
667 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
668 stw r12,THREAD+THREAD_VRSAVE(r2)
669END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
670#endif /* CONFIG_ALTIVEC */
671#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500672BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000673 oris r0,r0,MSR_SPE@h /* Disable SPE */
674 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
675 stw r12,THREAD+THREAD_SPEFSCR(r2)
Kumar Gala5e14d212007-09-13 01:44:20 -0500676END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000677#endif /* CONFIG_SPE */
678 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
679 beq+ 1f
680 andc r11,r11,r0
681 MTMSRD(r11)
682 isync
6831: stw r11,_MSR(r1)
684 mfcr r10
685 stw r10,_CCR(r1)
686 stw r1,KSP(r3) /* Set old stack pointer */
687
688#ifdef CONFIG_SMP
689 /* We need a sync somewhere here to make sure that if the
690 * previous task gets rescheduled on another CPU, it sees all
691 * stores it has performed on this one.
692 */
693 sync
694#endif /* CONFIG_SMP */
695
696 tophys(r0,r4)
697 CLR_TOP32(r0)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000698 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
Paul Mackerras9994a332005-10-10 22:36:14 +1000699 lwz r1,KSP(r4) /* Load new stack pointer */
700
701 /* save the old current 'last' for return value */
702 mr r3,r2
703 addi r2,r4,-THREAD /* Update current */
704
705#ifdef CONFIG_ALTIVEC
706BEGIN_FTR_SECTION
707 lwz r0,THREAD+THREAD_VRSAVE(r2)
708 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
709END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
710#endif /* CONFIG_ALTIVEC */
711#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500712BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000713 lwz r0,THREAD+THREAD_SPEFSCR(r2)
714 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
Kumar Gala5e14d212007-09-13 01:44:20 -0500715END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000716#endif /* CONFIG_SPE */
717
718 lwz r0,_CCR(r1)
719 mtcrf 0xFF,r0
720 /* r3-r12 are destroyed -- Cort */
721 REST_NVGPRS(r1)
722
723 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
724 mtlr r4
725 addi r1,r1,INT_FRAME_SIZE
726 blr
727
728 .globl fast_exception_return
729fast_exception_return:
730#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
731 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
732 beq 1f /* if not, we've got problems */
733#endif
734
7352: REST_4GPRS(3, r11)
736 lwz r10,_CCR(r11)
737 REST_GPR(1, r11)
738 mtcr r10
739 lwz r10,_LINK(r11)
740 mtlr r10
741 REST_GPR(10, r11)
742 mtspr SPRN_SRR1,r9
743 mtspr SPRN_SRR0,r12
744 REST_GPR(9, r11)
745 REST_GPR(12, r11)
746 lwz r11,GPR11(r11)
747 SYNC
748 RFI
749
750#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
751/* check if the exception happened in a restartable section */
7521: lis r3,exc_exit_restart_end@ha
753 addi r3,r3,exc_exit_restart_end@l
754 cmplw r12,r3
755 bge 3f
756 lis r4,exc_exit_restart@ha
757 addi r4,r4,exc_exit_restart@l
758 cmplw r12,r4
759 blt 3f
760 lis r3,fee_restarts@ha
761 tophys(r3,r3)
762 lwz r5,fee_restarts@l(r3)
763 addi r5,r5,1
764 stw r5,fee_restarts@l(r3)
765 mr r12,r4 /* restart at exc_exit_restart */
766 b 2b
767
Kumar Gala991eb432007-05-14 17:11:58 -0500768 .section .bss
769 .align 2
770fee_restarts:
771 .space 4
772 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +1000773
774/* aargh, a nonrecoverable interrupt, panic */
775/* aargh, we don't know which trap this is */
776/* but the 601 doesn't implement the RI bit, so assume it's OK */
7773:
778BEGIN_FTR_SECTION
779 b 2b
780END_FTR_SECTION_IFSET(CPU_FTR_601)
781 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000782 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000783 addi r3,r1,STACK_FRAME_OVERHEAD
784 lis r10,MSR_KERNEL@h
785 ori r10,r10,MSR_KERNEL@l
786 bl transfer_to_handler_full
787 .long nonrecoverable_exception
788 .long ret_from_except
789#endif
790
Paul Mackerras9994a332005-10-10 22:36:14 +1000791 .globl ret_from_except_full
792ret_from_except_full:
793 REST_NVGPRS(r1)
794 /* fall through */
795
796 .globl ret_from_except
797ret_from_except:
798 /* Hard-disable interrupts so that current_thread_info()->flags
799 * can't change between when we test it and when we return
800 * from the interrupt. */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000801 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000802 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
803 SYNC /* Some chip revs have problems here... */
804 MTMSRD(r10) /* disable interrupts */
805
806 lwz r3,_MSR(r1) /* Returning to user mode? */
807 andi. r0,r3,MSR_PR
808 beq resume_kernel
809
810user_exc_return: /* r10 contains MSR_KERNEL here */
811 /* Check current_thread_info()->flags */
Stuart Yoder9778b692012-07-05 04:41:35 +0000812 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000813 lwz r9,TI_FLAGS(r9)
Roland McGrath7a101742008-04-28 17:30:37 +1000814 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000815 bne do_work
816
817restore_user:
818#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500819 /* Check whether this process has its own DBCR0 value. The internal
820 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000821 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000822 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000823 bnel- load_dbcr0
824#endif
825
826#ifdef CONFIG_PREEMPT
827 b restore
828
829/* N.B. the only way to get here is from the beq following ret_from_except. */
830resume_kernel:
831 /* check current_thread_info->preempt_count */
Stuart Yoder9778b692012-07-05 04:41:35 +0000832 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000833 lwz r0,TI_PREEMPT(r9)
834 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
835 bne restore
836 lwz r0,TI_FLAGS(r9)
837 andi. r0,r0,_TIF_NEED_RESCHED
838 beq+ restore
839 andi. r0,r3,MSR_EE /* interrupts off? */
840 beq restore /* don't schedule if so */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000841#ifdef CONFIG_TRACE_IRQFLAGS
842 /* Lockdep thinks irqs are enabled, we need to call
843 * preempt_schedule_irq with IRQs off, so we inform lockdep
844 * now that we -did- turn them off already
845 */
846 bl trace_hardirqs_off
847#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10008481: bl preempt_schedule_irq
Stuart Yoder9778b692012-07-05 04:41:35 +0000849 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000850 lwz r3,TI_FLAGS(r9)
851 andi. r0,r3,_TIF_NEED_RESCHED
852 bne- 1b
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000853#ifdef CONFIG_TRACE_IRQFLAGS
854 /* And now, to properly rebalance the above, we tell lockdep they
855 * are being turned back on, which will happen when we return
856 */
857 bl trace_hardirqs_on
858#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000859#else
860resume_kernel:
861#endif /* CONFIG_PREEMPT */
862
863 /* interrupts are hard-disabled at this point */
864restore:
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100865#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000866BEGIN_MMU_FTR_SECTION
867 b 1f
868END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100869 lis r4,icache_44x_need_flush@ha
870 lwz r5,icache_44x_need_flush@l(r4)
871 cmplwi cr0,r5,0
872 beq+ 1f
873 li r6,0
874 iccci r0,r0
875 stw r6,icache_44x_need_flush@l(r4)
8761:
877#endif /* CONFIG_44x */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000878
879 lwz r9,_MSR(r1)
880#ifdef CONFIG_TRACE_IRQFLAGS
881 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
882 * off in this assembly code while peeking at TI_FLAGS() and such. However
883 * we need to inform it if the exception turned interrupts off, and we
884 * are about to trun them back on.
885 *
886 * The problem here sadly is that we don't know whether the exceptions was
887 * one that turned interrupts off or not. So we always tell lockdep about
888 * turning them on here when we go back to wherever we came from with EE
889 * on, even if that may meen some redudant calls being tracked. Maybe later
890 * we could encode what the exception did somewhere or test the exception
891 * type in the pt_regs but that sounds overkill
892 */
893 andi. r10,r9,MSR_EE
894 beq 1f
Steven Rostedt06ca2182010-12-22 16:42:56 +0000895 /*
896 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
897 * which is the stack frame here, we need to force a stack frame
898 * in case we came from user space.
899 */
900 stwu r1,-32(r1)
901 mflr r0
902 stw r0,4(r1)
903 stwu r1,-32(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000904 bl trace_hardirqs_on
Steven Rostedt06ca2182010-12-22 16:42:56 +0000905 lwz r1,0(r1)
906 lwz r1,0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000907 lwz r9,_MSR(r1)
9081:
909#endif /* CONFIG_TRACE_IRQFLAGS */
910
Paul Mackerras9994a332005-10-10 22:36:14 +1000911 lwz r0,GPR0(r1)
912 lwz r2,GPR2(r1)
913 REST_4GPRS(3, r1)
914 REST_2GPRS(7, r1)
915
916 lwz r10,_XER(r1)
917 lwz r11,_CTR(r1)
918 mtspr SPRN_XER,r10
919 mtctr r11
920
921 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100922BEGIN_FTR_SECTION
923 lwarx r11,0,r1
924END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000925 stwcx. r0,0,r1 /* to clear the reservation */
926
927#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
Paul Mackerras9994a332005-10-10 22:36:14 +1000928 andi. r10,r9,MSR_RI /* check if this exception occurred */
929 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
930
931 lwz r10,_CCR(r1)
932 lwz r11,_LINK(r1)
933 mtcrf 0xFF,r10
934 mtlr r11
935
936 /*
937 * Once we put values in SRR0 and SRR1, we are in a state
938 * where exceptions are not recoverable, since taking an
939 * exception will trash SRR0 and SRR1. Therefore we clear the
940 * MSR:RI bit to indicate this. If we do take an exception,
941 * we can't return to the point of the exception but we
942 * can restart the exception exit path at the label
943 * exc_exit_restart below. -- paulus
944 */
945 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
946 SYNC
947 MTMSRD(r10) /* clear the RI bit */
948 .globl exc_exit_restart
949exc_exit_restart:
Paul Mackerras9994a332005-10-10 22:36:14 +1000950 lwz r12,_NIP(r1)
951 FIX_SRR1(r9,r10)
952 mtspr SPRN_SRR0,r12
953 mtspr SPRN_SRR1,r9
954 REST_4GPRS(9, r1)
955 lwz r1,GPR1(r1)
956 .globl exc_exit_restart_end
957exc_exit_restart_end:
958 SYNC
959 RFI
960
961#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
962 /*
963 * This is a bit different on 4xx/Book-E because it doesn't have
964 * the RI bit in the MSR.
965 * The TLB miss handler checks if we have interrupted
966 * the exception exit path and restarts it if so
967 * (well maybe one day it will... :).
968 */
969 lwz r11,_LINK(r1)
970 mtlr r11
971 lwz r10,_CCR(r1)
972 mtcrf 0xff,r10
973 REST_2GPRS(9, r1)
974 .globl exc_exit_restart
975exc_exit_restart:
976 lwz r11,_NIP(r1)
977 lwz r12,_MSR(r1)
978exc_exit_start:
979 mtspr SPRN_SRR0,r11
980 mtspr SPRN_SRR1,r12
981 REST_2GPRS(11, r1)
982 lwz r1,GPR1(r1)
983 .globl exc_exit_restart_end
984exc_exit_restart_end:
985 PPC405_ERR77_SYNC
986 rfi
987 b . /* prevent prefetch past rfi */
988
989/*
990 * Returning from a critical interrupt in user mode doesn't need
991 * to be any different from a normal exception. For a critical
992 * interrupt in the kernel, we just return (without checking for
993 * preemption) since the interrupt may have happened at some crucial
994 * place (e.g. inside the TLB miss handler), and because we will be
995 * running with r1 pointing into critical_stack, not the current
996 * process's kernel stack (and therefore current_thread_info() will
997 * give the wrong answer).
998 * We have to restore various SPRs that may have been in use at the
999 * time of the critical interrupt.
1000 *
1001 */
1002#ifdef CONFIG_40x
1003#define PPC_40x_TURN_OFF_MSR_DR \
1004 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1005 * assume the instructions here are mapped by a pinned TLB entry */ \
1006 li r10,MSR_IR; \
1007 mtmsr r10; \
1008 isync; \
1009 tophys(r1, r1);
1010#else
1011#define PPC_40x_TURN_OFF_MSR_DR
1012#endif
1013
1014#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1015 REST_NVGPRS(r1); \
1016 lwz r3,_MSR(r1); \
1017 andi. r3,r3,MSR_PR; \
1018 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1019 bne user_exc_return; \
1020 lwz r0,GPR0(r1); \
1021 lwz r2,GPR2(r1); \
1022 REST_4GPRS(3, r1); \
1023 REST_2GPRS(7, r1); \
1024 lwz r10,_XER(r1); \
1025 lwz r11,_CTR(r1); \
1026 mtspr SPRN_XER,r10; \
1027 mtctr r11; \
1028 PPC405_ERR77(0,r1); \
1029 stwcx. r0,0,r1; /* to clear the reservation */ \
1030 lwz r11,_LINK(r1); \
1031 mtlr r11; \
1032 lwz r10,_CCR(r1); \
1033 mtcrf 0xff,r10; \
1034 PPC_40x_TURN_OFF_MSR_DR; \
1035 lwz r9,_DEAR(r1); \
1036 lwz r10,_ESR(r1); \
1037 mtspr SPRN_DEAR,r9; \
1038 mtspr SPRN_ESR,r10; \
1039 lwz r11,_NIP(r1); \
1040 lwz r12,_MSR(r1); \
1041 mtspr exc_lvl_srr0,r11; \
1042 mtspr exc_lvl_srr1,r12; \
1043 lwz r9,GPR9(r1); \
1044 lwz r12,GPR12(r1); \
1045 lwz r10,GPR10(r1); \
1046 lwz r11,GPR11(r1); \
1047 lwz r1,GPR1(r1); \
1048 PPC405_ERR77_SYNC; \
1049 exc_lvl_rfi; \
1050 b .; /* prevent prefetch past exc_lvl_rfi */
1051
Kumar Galafca622c2008-04-30 05:23:21 -05001052#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1053 lwz r9,_##exc_lvl_srr0(r1); \
1054 lwz r10,_##exc_lvl_srr1(r1); \
1055 mtspr SPRN_##exc_lvl_srr0,r9; \
1056 mtspr SPRN_##exc_lvl_srr1,r10;
1057
Kumar Gala70fe3af2009-02-12 16:12:40 -06001058#if defined(CONFIG_PPC_BOOK3E_MMU)
Kumar Galafca622c2008-04-30 05:23:21 -05001059#ifdef CONFIG_PHYS_64BIT
1060#define RESTORE_MAS7 \
1061 lwz r11,MAS7(r1); \
1062 mtspr SPRN_MAS7,r11;
1063#else
1064#define RESTORE_MAS7
1065#endif /* CONFIG_PHYS_64BIT */
1066#define RESTORE_MMU_REGS \
1067 lwz r9,MAS0(r1); \
1068 lwz r10,MAS1(r1); \
1069 lwz r11,MAS2(r1); \
1070 mtspr SPRN_MAS0,r9; \
1071 lwz r9,MAS3(r1); \
1072 mtspr SPRN_MAS1,r10; \
1073 lwz r10,MAS6(r1); \
1074 mtspr SPRN_MAS2,r11; \
1075 mtspr SPRN_MAS3,r9; \
1076 mtspr SPRN_MAS6,r10; \
1077 RESTORE_MAS7;
1078#elif defined(CONFIG_44x)
1079#define RESTORE_MMU_REGS \
1080 lwz r9,MMUCR(r1); \
1081 mtspr SPRN_MMUCR,r9;
1082#else
1083#define RESTORE_MMU_REGS
1084#endif
1085
1086#ifdef CONFIG_40x
Paul Mackerras9994a332005-10-10 22:36:14 +10001087 .globl ret_from_crit_exc
1088ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001089 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001090 lis r10,saved_ksp_limit@ha;
1091 lwz r10,saved_ksp_limit@l(r10);
1092 tovirt(r9,r9);
1093 stw r10,KSP_LIMIT(r9)
1094 lis r9,crit_srr0@ha;
1095 lwz r9,crit_srr0@l(r9);
1096 lis r10,crit_srr1@ha;
1097 lwz r10,crit_srr1@l(r10);
1098 mtspr SPRN_SRR0,r9;
1099 mtspr SPRN_SRR1,r10;
Kumar Gala16c57b32009-02-10 20:10:44 +00001100 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001101#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +10001102
1103#ifdef CONFIG_BOOKE
Kumar Galafca622c2008-04-30 05:23:21 -05001104 .globl ret_from_crit_exc
1105ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001106 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001107 lwz r10,SAVED_KSP_LIMIT(r1)
1108 stw r10,KSP_LIMIT(r9)
1109 RESTORE_xSRR(SRR0,SRR1);
1110 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001111 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001112
Paul Mackerras9994a332005-10-10 22:36:14 +10001113 .globl ret_from_debug_exc
1114ret_from_debug_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001115 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001116 lwz r10,SAVED_KSP_LIMIT(r1)
1117 stw r10,KSP_LIMIT(r9)
1118 lwz r9,THREAD_INFO-THREAD(r9)
Stuart Yoder9778b692012-07-05 04:41:35 +00001119 CURRENT_THREAD_INFO(r10, r1)
Kumar Galafca622c2008-04-30 05:23:21 -05001120 lwz r10,TI_PREEMPT(r10)
1121 stw r10,TI_PREEMPT(r9)
1122 RESTORE_xSRR(SRR0,SRR1);
1123 RESTORE_xSRR(CSRR0,CSRR1);
1124 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001125 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001126
1127 .globl ret_from_mcheck_exc
1128ret_from_mcheck_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001129 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001130 lwz r10,SAVED_KSP_LIMIT(r1)
1131 stw r10,KSP_LIMIT(r9)
1132 RESTORE_xSRR(SRR0,SRR1);
1133 RESTORE_xSRR(CSRR0,CSRR1);
1134 RESTORE_xSRR(DSRR0,DSRR1);
1135 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001136 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001137#endif /* CONFIG_BOOKE */
1138
1139/*
1140 * Load the DBCR0 value for a task that is being ptraced,
1141 * having first saved away the global DBCR0. Note that r0
1142 * has the dbcr0 value to set upon entry to this.
1143 */
1144load_dbcr0:
1145 mfmsr r10 /* first disable debug exceptions */
1146 rlwinm r10,r10,0,~MSR_DE
1147 mtmsr r10
1148 isync
1149 mfspr r10,SPRN_DBCR0
1150 lis r11,global_dbcr0@ha
1151 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -05001152#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +00001153 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -05001154 lwz r9,TI_CPU(r9)
1155 slwi r9,r9,3
1156 add r11,r11,r9
1157#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10001158 stw r10,0(r11)
1159 mtspr SPRN_DBCR0,r0
1160 lwz r10,4(r11)
1161 addi r10,r10,1
1162 stw r10,4(r11)
1163 li r11,-1
1164 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1165 blr
1166
Kumar Gala991eb432007-05-14 17:11:58 -05001167 .section .bss
1168 .align 4
1169global_dbcr0:
Kumar Gala4eaddb42008-04-09 16:15:40 -05001170 .space 8*NR_CPUS
Kumar Gala991eb432007-05-14 17:11:58 -05001171 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001172#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1173
1174do_work: /* r10 contains MSR_KERNEL here */
1175 andi. r0,r9,_TIF_NEED_RESCHED
1176 beq do_user_signal
1177
1178do_resched: /* r10 contains MSR_KERNEL here */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001179 /* Note: We don't need to inform lockdep that we are enabling
1180 * interrupts here. As far as it knows, they are already enabled
1181 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001182 ori r10,r10,MSR_EE
1183 SYNC
1184 MTMSRD(r10) /* hard-enable interrupts */
1185 bl schedule
1186recheck:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001187 /* Note: And we don't tell it we are disabling them again
1188 * neither. Those disable/enable cycles used to peek at
1189 * TI_FLAGS aren't advertised.
1190 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001191 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1192 SYNC
1193 MTMSRD(r10) /* disable interrupts */
Stuart Yoder9778b692012-07-05 04:41:35 +00001194 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001195 lwz r9,TI_FLAGS(r9)
1196 andi. r0,r9,_TIF_NEED_RESCHED
1197 bne- do_resched
Roland McGrath7a101742008-04-28 17:30:37 +10001198 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +10001199 beq restore_user
1200do_user_signal: /* r10 contains MSR_KERNEL here */
1201 ori r10,r10,MSR_EE
1202 SYNC
1203 MTMSRD(r10) /* hard-enable interrupts */
1204 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001205 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001206 andi. r0,r3,1
1207 beq 2f
1208 SAVE_NVGPRS(r1)
1209 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001210 stw r3,_TRAP(r1)
Roland McGrath7d6d6372008-07-27 16:52:52 +100012112: addi r3,r1,STACK_FRAME_OVERHEAD
1212 mr r4,r9
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +11001213 bl do_notify_resume
Paul Mackerras9994a332005-10-10 22:36:14 +10001214 REST_NVGPRS(r1)
1215 b recheck
1216
1217/*
1218 * We come here when we are at the end of handling an exception
1219 * that occurred at a place where taking an exception will lose
1220 * state information, such as the contents of SRR0 and SRR1.
1221 */
1222nonrecoverable:
1223 lis r10,exc_exit_restart_end@ha
1224 addi r10,r10,exc_exit_restart_end@l
1225 cmplw r12,r10
1226 bge 3f
1227 lis r11,exc_exit_restart@ha
1228 addi r11,r11,exc_exit_restart@l
1229 cmplw r12,r11
1230 blt 3f
1231 lis r10,ee_restarts@ha
1232 lwz r12,ee_restarts@l(r10)
1233 addi r12,r12,1
1234 stw r12,ee_restarts@l(r10)
1235 mr r12,r11 /* restart at exc_exit_restart */
1236 blr
12373: /* OK, we can't recover, kill this process */
1238 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1239BEGIN_FTR_SECTION
1240 blr
1241END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001242 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001243 andi. r0,r3,1
1244 beq 4f
1245 SAVE_NVGPRS(r1)
1246 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001247 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100012484: addi r3,r1,STACK_FRAME_OVERHEAD
1249 bl nonrecoverable_exception
1250 /* shouldn't return */
1251 b 4b
1252
Kumar Gala991eb432007-05-14 17:11:58 -05001253 .section .bss
1254 .align 2
1255ee_restarts:
1256 .space 4
1257 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001258
1259/*
1260 * PROM code for specific machines follows. Put it
1261 * here so it's easy to add arch-specific sections later.
1262 * -- Cort
1263 */
Paul Mackerras033ef332005-10-26 17:05:24 +10001264#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +10001265/*
1266 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1267 * called with the MMU off.
1268 */
1269_GLOBAL(enter_rtas)
1270 stwu r1,-INT_FRAME_SIZE(r1)
1271 mflr r0
1272 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +11001273 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001274 lis r6,1f@ha /* physical return address for rtas */
1275 addi r6,r6,1f@l
1276 tophys(r6,r6)
1277 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +10001278 lwz r8,RTASENTRY(r4)
1279 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +10001280 mfmsr r9
1281 stw r9,8(r1)
1282 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1283 SYNC /* disable interrupts so SRR0/1 */
1284 MTMSRD(r0) /* don't get trashed */
1285 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1286 mtlr r6
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001287 mtspr SPRN_SPRG_RTAS,r7
Paul Mackerras9994a332005-10-10 22:36:14 +10001288 mtspr SPRN_SRR0,r8
1289 mtspr SPRN_SRR1,r9
1290 RFI
12911: tophys(r9,r1)
1292 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1293 lwz r9,8(r9) /* original msr value */
1294 FIX_SRR1(r9,r0)
1295 addi r1,r1,INT_FRAME_SIZE
1296 li r0,0
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001297 mtspr SPRN_SPRG_RTAS,r0
Paul Mackerras9994a332005-10-10 22:36:14 +10001298 mtspr SPRN_SRR0,r8
1299 mtspr SPRN_SRR1,r9
1300 RFI /* return to caller */
1301
1302 .globl machine_check_in_rtas
1303machine_check_in_rtas:
1304 twi 31,0,0
1305 /* XXX load up BATs and panic */
1306
Paul Mackerras033ef332005-10-26 17:05:24 +10001307#endif /* CONFIG_PPC_RTAS */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001308
Steven Rostedt606576c2008-10-06 19:06:12 -04001309#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001310#ifdef CONFIG_DYNAMIC_FTRACE
1311_GLOBAL(mcount)
1312_GLOBAL(_mcount)
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001313 /*
1314 * It is required that _mcount on PPC32 must preserve the
1315 * link register. But we have r0 to play with. We use r0
1316 * to push the return address back to the caller of mcount
1317 * into the ctr register, restore the link register and
1318 * then jump back using the ctr register.
1319 */
1320 mflr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001321 mtctr r0
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001322 lwz r0, 4(r1)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001323 mtlr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001324 bctr
1325
1326_GLOBAL(ftrace_caller)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001327 MCOUNT_SAVE_FRAME
1328 /* r3 ends up with link register */
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301329 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001330.globl ftrace_call
1331ftrace_call:
1332 bl ftrace_stub
1333 nop
Steven Rostedt60ce8f72009-02-11 20:06:43 -05001334#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1335.globl ftrace_graph_call
1336ftrace_graph_call:
1337 b ftrace_graph_stub
1338_GLOBAL(ftrace_graph_stub)
1339#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001340 MCOUNT_RESTORE_FRAME
1341 /* old link register ends up in ctr reg */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001342 bctr
1343#else
1344_GLOBAL(mcount)
1345_GLOBAL(_mcount)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001346
1347 MCOUNT_SAVE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001348
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301349 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001350 LOAD_REG_ADDR(r5, ftrace_trace_function)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001351 lwz r5,0(r5)
Steven Rostedtccbfac22008-05-22 14:31:07 -04001352
Steven Rostedt4e491d12008-05-14 23:49:44 -04001353 mtctr r5
1354 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001355 nop
1356
Steven Rostedtfad4f472009-02-11 19:10:57 -05001357#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1358 b ftrace_graph_caller
1359#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001360 MCOUNT_RESTORE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001361 bctr
1362#endif
1363
1364_GLOBAL(ftrace_stub)
1365 blr
1366
Steven Rostedtfad4f472009-02-11 19:10:57 -05001367#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1368_GLOBAL(ftrace_graph_caller)
1369 /* load r4 with local address */
1370 lwz r4, 44(r1)
1371 subi r4, r4, MCOUNT_INSN_SIZE
1372
1373 /* get the parent address */
1374 addi r3, r1, 52
1375
1376 bl prepare_ftrace_return
1377 nop
1378
1379 MCOUNT_RESTORE_FRAME
1380 /* old link register ends up in ctr reg */
1381 bctr
1382
1383_GLOBAL(return_to_handler)
1384 /* need to save return values */
1385 stwu r1, -32(r1)
1386 stw r3, 20(r1)
1387 stw r4, 16(r1)
1388 stw r31, 12(r1)
1389 mr r31, r1
1390
1391 bl ftrace_return_to_handler
1392 nop
1393
1394 /* return value has real return address */
1395 mtlr r3
1396
1397 lwz r3, 20(r1)
1398 lwz r4, 16(r1)
1399 lwz r31,12(r1)
1400 lwz r1, 0(r1)
1401
1402 /* Jump back to real return address */
1403 blr
1404#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1405
Steven Rostedt4e491d12008-05-14 23:49:44 -04001406#endif /* CONFIG_MCOUNT */