blob: e514de57a125333a4ce174cb399070b6cc62e3b4 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Paul Mackerras9994a332005-10-10 22:36:14 +100022#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053033#include <asm/ftrace.h>
Stephen Rothwell46f52212010-11-18 15:06:17 +000034#include <asm/ptrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100035
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x) li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +100049 .globl mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050051 mfspr r0,SPRN_DSRR0
52 stw r0,_DSRR0(r11)
53 mfspr r0,SPRN_DSRR1
54 stw r0,_DSRR1(r11)
55 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100056
57 .globl debug_transfer_to_handler
58debug_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050059 mfspr r0,SPRN_CSRR0
60 stw r0,_CSRR0(r11)
61 mfspr r0,SPRN_CSRR1
62 stw r0,_CSRR1(r11)
63 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100064
65 .globl crit_transfer_to_handler
66crit_transfer_to_handler:
Kumar Gala70fe3af2009-02-12 16:12:40 -060067#ifdef CONFIG_PPC_BOOK3E_MMU
Kumar Galafca622c2008-04-30 05:23:21 -050068 mfspr r0,SPRN_MAS0
69 stw r0,MAS0(r11)
70 mfspr r0,SPRN_MAS1
71 stw r0,MAS1(r11)
72 mfspr r0,SPRN_MAS2
73 stw r0,MAS2(r11)
74 mfspr r0,SPRN_MAS3
75 stw r0,MAS3(r11)
76 mfspr r0,SPRN_MAS6
77 stw r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79 mfspr r0,SPRN_MAS7
80 stw r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
Kumar Gala70fe3af2009-02-12 16:12:40 -060082#endif /* CONFIG_PPC_BOOK3E_MMU */
Kumar Galafca622c2008-04-30 05:23:21 -050083#ifdef CONFIG_44x
84 mfspr r0,SPRN_MMUCR
85 stw r0,MMUCR(r11)
86#endif
87 mfspr r0,SPRN_SRR0
88 stw r0,_SRR0(r11)
89 mfspr r0,SPRN_SRR1
90 stw r0,_SRR1(r11)
91
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +000092 /* set the stack limit to the current stack
93 * and set the limit to protect the thread_info
94 * struct
95 */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +000096 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -050097 lwz r0,KSP_LIMIT(r8)
98 stw r0,SAVED_KSP_LIMIT(r11)
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +000099 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
Kumar Galafca622c2008-04-30 05:23:21 -0500100 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000101 /* fall through */
102#endif
103
104#ifdef CONFIG_40x
105 .globl crit_transfer_to_handler
106crit_transfer_to_handler:
107 lwz r0,crit_r10@l(0)
108 stw r0,GPR10(r11)
109 lwz r0,crit_r11@l(0)
110 stw r0,GPR11(r11)
Kumar Galafca622c2008-04-30 05:23:21 -0500111 mfspr r0,SPRN_SRR0
112 stw r0,crit_srr0@l(0)
113 mfspr r0,SPRN_SRR1
114 stw r0,crit_srr1@l(0)
115
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +0000116 /* set the stack limit to the current stack
117 * and set the limit to protect the thread_info
118 * struct
119 */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000120 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -0500121 lwz r0,KSP_LIMIT(r8)
122 stw r0,saved_ksp_limit@l(0)
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +0000123 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
Kumar Galafca622c2008-04-30 05:23:21 -0500124 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000125 /* fall through */
126#endif
127
128/*
129 * This code finishes saving the registers to the exception frame
130 * and jumps to the appropriate handler for the exception, turning
131 * on address translation.
132 * Note that we rely on the caller having set cr0.eq iff the exception
133 * occurred in kernel mode (i.e. MSR:PR = 0).
134 */
135 .globl transfer_to_handler_full
136transfer_to_handler_full:
137 SAVE_NVGPRS(r11)
138 /* fall through */
139
140 .globl transfer_to_handler
141transfer_to_handler:
142 stw r2,GPR2(r11)
143 stw r12,_NIP(r11)
144 stw r9,_MSR(r11)
145 andi. r2,r9,MSR_PR
146 mfctr r12
147 mfspr r2,SPRN_XER
148 stw r12,_CTR(r11)
149 stw r2,_XER(r11)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000150 mfspr r12,SPRN_SPRG_THREAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000151 addi r2,r12,-THREAD
152 tovirt(r2,r2) /* set r2 to current */
153 beq 2f /* if from user, fix up THREAD.regs */
154 addi r11,r1,STACK_FRAME_OVERHEAD
155 stw r11,PT_REGS(r12)
156#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
157 /* Check to see if the dbcr0 register is set up to debug. Use the
Kumar Gala4eaddb42008-04-09 16:15:40 -0500158 internal debug mode bit to do this. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000159 lwz r12,THREAD_DBCR0(r12)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000160 andis. r12,r12,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000161 beq+ 3f
162 /* From user and task is ptraced - load up global dbcr0 */
163 li r12,-1 /* clear all pending debug events */
164 mtspr SPRN_DBSR,r12
165 lis r11,global_dbcr0@ha
166 tophys(r11,r11)
167 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -0500168#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +0000169 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500170 lwz r9,TI_CPU(r9)
171 slwi r9,r9,3
172 add r11,r11,r9
173#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000174 lwz r12,0(r11)
175 mtspr SPRN_DBCR0,r12
176 lwz r12,4(r11)
177 addi r12,r12,-1
178 stw r12,4(r11)
179#endif
180 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000181
Paul Mackerras9994a332005-10-10 22:36:14 +10001822: /* if from kernel, check interrupted DOZE/NAP mode and
183 * check for stack overflow
184 */
Kumar Gala85218822008-04-28 16:21:22 +1000185 lwz r9,KSP_LIMIT(r12)
186 cmplw r1,r9 /* if r1 <= ksp_limit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000187 ble- stack_ovf /* then the kernel stack overflowed */
1885:
Kumar Galafc4033b2008-06-18 16:26:52 -0500189#if defined(CONFIG_6xx) || defined(CONFIG_E500)
Stuart Yoder9778b692012-07-05 04:41:35 +0000190 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000191 tophys(r9,r9) /* check local flags */
192 lwz r12,TI_LOCAL_FLAGS(r9)
193 mtcrf 0x01,r12
194 bt- 31-TLF_NAPPING,4f
Paul Mackerrasa5606432008-05-14 14:30:48 +1000195 bt- 31-TLF_SLEEPING,7f
Kumar Galafc4033b2008-06-18 16:26:52 -0500196#endif /* CONFIG_6xx || CONFIG_E500 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000197 .globl transfer_to_handler_cont
198transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10001993:
200 mflr r9
201 lwz r11,0(r9) /* virtual address of handler */
202 lwz r9,4(r9) /* where to go when done */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000203#ifdef CONFIG_TRACE_IRQFLAGS
204 lis r12,reenable_mmu@h
205 ori r12,r12,reenable_mmu@l
206 mtspr SPRN_SRR0,r12
207 mtspr SPRN_SRR1,r10
208 SYNC
209 RFI
210reenable_mmu: /* re-enable mmu so we can */
211 mfmsr r10
212 lwz r12,_MSR(r1)
213 xor r10,r10,r12
214 andi. r10,r10,MSR_EE /* Did EE change? */
215 beq 1f
216
Kevin Hao2cd76622011-11-10 16:04:17 +0000217 /*
218 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
219 * If from user mode there is only one stack frame on the stack, and
220 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
221 * stack frame to make trace_hardirqs_off happy.
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000222 *
223 * This is handy because we also need to save a bunch of GPRs,
224 * r3 can be different from GPR3(r1) at this point, r9 and r11
225 * contains the old MSR and handler address respectively,
226 * r4 & r5 can contain page fault arguments that need to be passed
227 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
228 * they aren't useful past this point (aren't syscall arguments),
229 * the rest is restored from the exception frame.
Kevin Hao2cd76622011-11-10 16:04:17 +0000230 */
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000231 stwu r1,-32(r1)
232 stw r9,8(r1)
233 stw r11,12(r1)
234 stw r3,16(r1)
235 stw r4,20(r1)
236 stw r5,24(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000237 bl trace_hardirqs_off
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000238 lwz r5,24(r1)
239 lwz r4,20(r1)
240 lwz r3,16(r1)
241 lwz r11,12(r1)
242 lwz r9,8(r1)
243 addi r1,r1,32
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000244 lwz r0,GPR0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000245 lwz r6,GPR6(r1)
246 lwz r7,GPR7(r1)
247 lwz r8,GPR8(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00002481: mtctr r11
249 mtlr r9
250 bctr /* jump to handler */
251#else /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000252 mtspr SPRN_SRR0,r11
253 mtspr SPRN_SRR1,r10
254 mtlr r9
255 SYNC
256 RFI /* jump to handler, enable MMU */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000257#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000258
Kumar Galafc4033b2008-06-18 16:26:52 -0500259#if defined (CONFIG_6xx) || defined(CONFIG_E500)
Paul Mackerrasf39224a2006-04-18 21:49:11 +10002604: rlwinm r12,r12,0,~_TLF_NAPPING
261 stw r12,TI_LOCAL_FLAGS(r9)
Kumar Galafc4033b2008-06-18 16:26:52 -0500262 b power_save_ppc32_restore
Paul Mackerrasa5606432008-05-14 14:30:48 +1000263
2647: rlwinm r12,r12,0,~_TLF_SLEEPING
265 stw r12,TI_LOCAL_FLAGS(r9)
266 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
267 rlwinm r9,r9,0,~MSR_EE
268 lwz r12,_LINK(r11) /* and return to address in LR */
269 b fast_exception_return
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100270#endif
271
Paul Mackerras9994a332005-10-10 22:36:14 +1000272/*
273 * On kernel stack overflow, load up an initial stack pointer
274 * and call StackOverflow(regs), which should not return.
275 */
276stack_ovf:
277 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000278 lis r12,_end@h
279 ori r12,r12,_end@l
280 cmplw r1,r12
281 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000282 SAVE_NVGPRS(r11)
283 addi r3,r1,STACK_FRAME_OVERHEAD
284 lis r1,init_thread_union@ha
285 addi r1,r1,init_thread_union@l
286 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
287 lis r9,StackOverflow@ha
288 addi r9,r9,StackOverflow@l
289 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
290 FIX_SRR1(r10,r12)
291 mtspr SPRN_SRR0,r9
292 mtspr SPRN_SRR1,r10
293 SYNC
294 RFI
295
296/*
297 * Handle a system call.
298 */
299 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
300 .stabs "entry_32.S",N_SO,0,0,0f
3010:
302
303_GLOBAL(DoSyscall)
Paul Mackerras9994a332005-10-10 22:36:14 +1000304 stw r3,ORIG_GPR3(r1)
305 li r12,0
306 stw r12,RESULT(r1)
307 lwz r11,_CCR(r1) /* Clear SO bit in CR */
308 rlwinm r11,r11,0,4,2
309 stw r11,_CCR(r1)
310#ifdef SHOW_SYSCALLS
311 bl do_show_syscall
312#endif /* SHOW_SYSCALLS */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000313#ifdef CONFIG_TRACE_IRQFLAGS
314 /* Return from syscalls can (and generally will) hard enable
315 * interrupts. You aren't supposed to call a syscall with
316 * interrupts disabled in the first place. However, to ensure
317 * that we get it right vs. lockdep if it happens, we force
318 * that hard enable here with appropriate tracing if we see
319 * that we have been called with interrupts off
320 */
321 mfmsr r11
322 andi. r12,r11,MSR_EE
323 bne+ 1f
324 /* We came in with interrupts disabled, we enable them now */
325 bl trace_hardirqs_on
326 mfmsr r11
327 lwz r0,GPR0(r1)
328 lwz r3,GPR3(r1)
329 lwz r4,GPR4(r1)
330 ori r11,r11,MSR_EE
331 lwz r5,GPR5(r1)
332 lwz r6,GPR6(r1)
333 lwz r7,GPR7(r1)
334 lwz r8,GPR8(r1)
335 mtmsr r11
3361:
337#endif /* CONFIG_TRACE_IRQFLAGS */
Stuart Yoder9778b692012-07-05 04:41:35 +0000338 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000339 lwz r11,TI_FLAGS(r10)
340 andi. r11,r11,_TIF_SYSCALL_T_OR_A
341 bne- syscall_dotrace
342syscall_dotrace_cont:
343 cmplwi 0,r0,NR_syscalls
344 lis r10,sys_call_table@h
345 ori r10,r10,sys_call_table@l
346 slwi r0,r0,2
347 bge- 66f
348 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
349 mtlr r10
350 addi r9,r1,STACK_FRAME_OVERHEAD
351 PPC440EP_ERR42
352 blrl /* Call handler */
353 .globl ret_from_syscall
354ret_from_syscall:
355#ifdef SHOW_SYSCALLS
356 bl do_show_syscall_exit
357#endif
358 mr r6,r3
Stuart Yoder9778b692012-07-05 04:41:35 +0000359 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000360 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000361 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000362 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000363 SYNC
364 MTMSRD(r10)
365 lwz r9,TI_FLAGS(r12)
David Woodhouse401d1f02005-11-15 18:52:18 +0000366 li r8,-_LAST_ERRNO
Paul Mackerras1bd79332006-03-08 13:24:22 +1100367 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000368 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000369 cmplw 0,r3,r8
370 blt+ syscall_exit_cont
371 lwz r11,_CCR(r1) /* Load CR */
372 neg r3,r3
373 oris r11,r11,0x1000 /* Set SO bit in CR */
374 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000375syscall_exit_cont:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000376 lwz r8,_MSR(r1)
377#ifdef CONFIG_TRACE_IRQFLAGS
378 /* If we are going to return from the syscall with interrupts
379 * off, we trace that here. It shouldn't happen though but we
380 * want to catch the bugger if it does right ?
381 */
382 andi. r10,r8,MSR_EE
383 bne+ 1f
384 stw r3,GPR3(r1)
385 bl trace_hardirqs_off
386 lwz r3,GPR3(r1)
3871:
388#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000389#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500390 /* If the process has its own DBCR0 value, load it up. The internal
391 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000392 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000393 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000394 bnel- load_dbcr0
395#endif
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100396#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000397BEGIN_MMU_FTR_SECTION
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100398 lis r4,icache_44x_need_flush@ha
399 lwz r5,icache_44x_need_flush@l(r4)
400 cmplwi cr0,r5,0
401 bne- 2f
4021:
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000403END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100404#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100405BEGIN_FTR_SECTION
406 lwarx r7,0,r1
407END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000408 stwcx. r0,0,r1 /* to clear the reservation */
409 lwz r4,_LINK(r1)
410 lwz r5,_CCR(r1)
411 mtlr r4
412 mtcr r5
413 lwz r7,_NIP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000414 FIX_SRR1(r8, r0)
415 lwz r2,GPR2(r1)
416 lwz r1,GPR1(r1)
417 mtspr SPRN_SRR0,r7
418 mtspr SPRN_SRR1,r8
419 SYNC
420 RFI
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100421#ifdef CONFIG_44x
4222: li r7,0
423 iccci r0,r0
424 stw r7,icache_44x_need_flush@l(r4)
425 b 1b
426#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000427
42866: li r3,-ENOSYS
429 b ret_from_syscall
430
431 .globl ret_from_fork
432ret_from_fork:
433 REST_NVGPRS(r1)
434 bl schedule_tail
435 li r3,0
436 b ret_from_syscall
437
Al Viro58254e12012-09-12 18:32:42 -0400438 .globl ret_from_kernel_thread
439ret_from_kernel_thread:
440 REST_NVGPRS(r1)
441 bl schedule_tail
Li Zhong41d82bd2012-12-02 20:19:22 +0000442 li r3,0
443 stw r3,0(r1)
Al Viro58254e12012-09-12 18:32:42 -0400444 mtlr r14
445 mr r3,r15
446 PPC440EP_ERR42
447 blrl
448 li r3,0
Al Virobe6abfa2012-08-31 15:48:05 -0400449 b ret_from_syscall
450
Paul Mackerras9994a332005-10-10 22:36:14 +1000451/* Traced system call support */
452syscall_dotrace:
453 SAVE_NVGPRS(r1)
454 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000455 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000456 addi r3,r1,STACK_FRAME_OVERHEAD
457 bl do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000458 /*
459 * Restore argument registers possibly just changed.
460 * We use the return value of do_syscall_trace_enter
461 * for call number to look up in the table (r0).
462 */
463 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000464 lwz r3,GPR3(r1)
465 lwz r4,GPR4(r1)
466 lwz r5,GPR5(r1)
467 lwz r6,GPR6(r1)
468 lwz r7,GPR7(r1)
469 lwz r8,GPR8(r1)
470 REST_NVGPRS(r1)
471 b syscall_dotrace_cont
472
473syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000474 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100475 beq+ 0f
476 REST_NVGPRS(r1)
477 b 2f
4780: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000479 blt+ 1f
480 andi. r0,r9,_TIF_NOERROR
481 bne- 1f
482 lwz r11,_CCR(r1) /* Load CR */
483 neg r3,r3
484 oris r11,r11,0x1000 /* Set SO bit in CR */
485 stw r11,_CCR(r1)
486
4871: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000488 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00004892: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
490 beq 4f
491
Paul Mackerras1bd79332006-03-08 13:24:22 +1100492 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000493
494 li r11,_TIF_PERSYSCALL_MASK
495 addi r12,r12,TI_FLAGS
4963: lwarx r8,0,r12
497 andc r8,r8,r11
498#ifdef CONFIG_IBM405_ERR77
499 dcbt 0,r12
500#endif
501 stwcx. r8,0,r12
502 bne- 3b
503 subi r12,r12,TI_FLAGS
504
5054: /* Anything which requires enabling interrupts? */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100506 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
507 beq ret_from_except
508
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000509 /* Re-enable interrupts. There is no need to trace that with
510 * lockdep as we are supposed to have IRQs on at this point
511 */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100512 ori r10,r10,MSR_EE
513 SYNC
514 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000515
516 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000517 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000518 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000519 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000520 SAVE_NVGPRS(r1)
521 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000522 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11005235:
Paul Mackerras9994a332005-10-10 22:36:14 +1000524 addi r3,r1,STACK_FRAME_OVERHEAD
525 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100526 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000527
Paul Mackerras9994a332005-10-10 22:36:14 +1000528#ifdef SHOW_SYSCALLS
529do_show_syscall:
530#ifdef SHOW_SYSCALLS_TASK
531 lis r11,show_syscalls_task@ha
532 lwz r11,show_syscalls_task@l(r11)
533 cmp 0,r2,r11
534 bnelr
535#endif
536 stw r31,GPR31(r1)
537 mflr r31
538 lis r3,7f@ha
539 addi r3,r3,7f@l
540 lwz r4,GPR0(r1)
541 lwz r5,GPR3(r1)
542 lwz r6,GPR4(r1)
543 lwz r7,GPR5(r1)
544 lwz r8,GPR6(r1)
545 lwz r9,GPR7(r1)
546 bl printk
547 lis r3,77f@ha
548 addi r3,r3,77f@l
549 lwz r4,GPR8(r1)
550 mr r5,r2
551 bl printk
552 lwz r0,GPR0(r1)
553 lwz r3,GPR3(r1)
554 lwz r4,GPR4(r1)
555 lwz r5,GPR5(r1)
556 lwz r6,GPR6(r1)
557 lwz r7,GPR7(r1)
558 lwz r8,GPR8(r1)
559 mtlr r31
560 lwz r31,GPR31(r1)
561 blr
562
563do_show_syscall_exit:
564#ifdef SHOW_SYSCALLS_TASK
565 lis r11,show_syscalls_task@ha
566 lwz r11,show_syscalls_task@l(r11)
567 cmp 0,r2,r11
568 bnelr
569#endif
570 stw r31,GPR31(r1)
571 mflr r31
572 stw r3,RESULT(r1) /* Save result */
573 mr r4,r3
574 lis r3,79f@ha
575 addi r3,r3,79f@l
576 bl printk
577 lwz r3,RESULT(r1)
578 mtlr r31
579 lwz r31,GPR31(r1)
580 blr
581
5827: .string "syscall %d(%x, %x, %x, %x, %x, "
58377: .string "%x), current=%p\n"
58479: .string " -> %x\n"
585 .align 2,0
586
587#ifdef SHOW_SYSCALLS_TASK
588 .data
589 .globl show_syscalls_task
590show_syscalls_task:
591 .long -1
592 .text
593#endif
594#endif /* SHOW_SYSCALLS */
595
596/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000597 * The fork/clone functions need to copy the full register set into
598 * the child process. Therefore we need to save all the nonvolatile
599 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000600 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000601 .globl ppc_fork
602ppc_fork:
603 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000604 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000605 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000606 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000607 b sys_fork
608
609 .globl ppc_vfork
610ppc_vfork:
611 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000612 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000613 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000614 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000615 b sys_vfork
616
617 .globl ppc_clone
618ppc_clone:
619 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000620 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000621 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000622 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000623 b sys_clone
624
Paul Mackerras1bd79332006-03-08 13:24:22 +1100625 .globl ppc_swapcontext
626ppc_swapcontext:
627 SAVE_NVGPRS(r1)
628 lwz r0,_TRAP(r1)
629 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
630 stw r0,_TRAP(r1) /* register set saved */
631 b sys_swapcontext
632
Paul Mackerras9994a332005-10-10 22:36:14 +1000633/*
634 * Top-level page fault handling.
635 * This is in assembler because if do_page_fault tells us that
636 * it is a bad kernel page fault, we want to save the non-volatile
637 * registers before calling bad_page_fault.
638 */
639 .globl handle_page_fault
640handle_page_fault:
641 stw r4,_DAR(r1)
642 addi r3,r1,STACK_FRAME_OVERHEAD
643 bl do_page_fault
644 cmpwi r3,0
645 beq+ ret_from_except
646 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000647 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000648 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000649 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000650 mr r5,r3
651 addi r3,r1,STACK_FRAME_OVERHEAD
652 lwz r4,_DAR(r1)
653 bl bad_page_fault
654 b ret_from_except_full
655
656/*
657 * This routine switches between two different tasks. The process
658 * state of one is saved on its kernel stack. Then the state
659 * of the other is restored from its kernel stack. The memory
660 * management hardware is updated to the second process's state.
661 * Finally, we can return to the second process.
662 * On entry, r3 points to the THREAD for the current task, r4
663 * points to the THREAD for the new task.
664 *
665 * This routine is always called with interrupts disabled.
666 *
667 * Note: there are two ways to get to the "going out" portion
668 * of this code; either by coming in via the entry (_switch)
669 * or via "fork" which must set up an environment equivalent
670 * to the "_switch" path. If you change this , you'll have to
671 * change the fork code also.
672 *
673 * The code which creates the new task context is in 'copy_thread'
674 * in arch/ppc/kernel/process.c
675 */
676_GLOBAL(_switch)
677 stwu r1,-INT_FRAME_SIZE(r1)
678 mflr r0
679 stw r0,INT_FRAME_SIZE+4(r1)
680 /* r3-r12 are caller saved -- Cort */
681 SAVE_NVGPRS(r1)
682 stw r0,_NIP(r1) /* Return to switch caller */
683 mfmsr r11
684 li r0,MSR_FP /* Disable floating-point */
685#ifdef CONFIG_ALTIVEC
686BEGIN_FTR_SECTION
687 oris r0,r0,MSR_VEC@h /* Disable altivec */
688 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
689 stw r12,THREAD+THREAD_VRSAVE(r2)
690END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
691#endif /* CONFIG_ALTIVEC */
692#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500693BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000694 oris r0,r0,MSR_SPE@h /* Disable SPE */
695 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
696 stw r12,THREAD+THREAD_SPEFSCR(r2)
Kumar Gala5e14d212007-09-13 01:44:20 -0500697END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000698#endif /* CONFIG_SPE */
699 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
700 beq+ 1f
701 andc r11,r11,r0
702 MTMSRD(r11)
703 isync
7041: stw r11,_MSR(r1)
705 mfcr r10
706 stw r10,_CCR(r1)
707 stw r1,KSP(r3) /* Set old stack pointer */
708
709#ifdef CONFIG_SMP
710 /* We need a sync somewhere here to make sure that if the
711 * previous task gets rescheduled on another CPU, it sees all
712 * stores it has performed on this one.
713 */
714 sync
715#endif /* CONFIG_SMP */
716
717 tophys(r0,r4)
718 CLR_TOP32(r0)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000719 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
Paul Mackerras9994a332005-10-10 22:36:14 +1000720 lwz r1,KSP(r4) /* Load new stack pointer */
721
722 /* save the old current 'last' for return value */
723 mr r3,r2
724 addi r2,r4,-THREAD /* Update current */
725
726#ifdef CONFIG_ALTIVEC
727BEGIN_FTR_SECTION
728 lwz r0,THREAD+THREAD_VRSAVE(r2)
729 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
730END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
731#endif /* CONFIG_ALTIVEC */
732#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500733BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000734 lwz r0,THREAD+THREAD_SPEFSCR(r2)
735 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
Kumar Gala5e14d212007-09-13 01:44:20 -0500736END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000737#endif /* CONFIG_SPE */
738
739 lwz r0,_CCR(r1)
740 mtcrf 0xFF,r0
741 /* r3-r12 are destroyed -- Cort */
742 REST_NVGPRS(r1)
743
744 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
745 mtlr r4
746 addi r1,r1,INT_FRAME_SIZE
747 blr
748
749 .globl fast_exception_return
750fast_exception_return:
751#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
752 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
753 beq 1f /* if not, we've got problems */
754#endif
755
7562: REST_4GPRS(3, r11)
757 lwz r10,_CCR(r11)
758 REST_GPR(1, r11)
759 mtcr r10
760 lwz r10,_LINK(r11)
761 mtlr r10
762 REST_GPR(10, r11)
763 mtspr SPRN_SRR1,r9
764 mtspr SPRN_SRR0,r12
765 REST_GPR(9, r11)
766 REST_GPR(12, r11)
767 lwz r11,GPR11(r11)
768 SYNC
769 RFI
770
771#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
772/* check if the exception happened in a restartable section */
7731: lis r3,exc_exit_restart_end@ha
774 addi r3,r3,exc_exit_restart_end@l
775 cmplw r12,r3
776 bge 3f
777 lis r4,exc_exit_restart@ha
778 addi r4,r4,exc_exit_restart@l
779 cmplw r12,r4
780 blt 3f
781 lis r3,fee_restarts@ha
782 tophys(r3,r3)
783 lwz r5,fee_restarts@l(r3)
784 addi r5,r5,1
785 stw r5,fee_restarts@l(r3)
786 mr r12,r4 /* restart at exc_exit_restart */
787 b 2b
788
Kumar Gala991eb432007-05-14 17:11:58 -0500789 .section .bss
790 .align 2
791fee_restarts:
792 .space 4
793 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +1000794
795/* aargh, a nonrecoverable interrupt, panic */
796/* aargh, we don't know which trap this is */
797/* but the 601 doesn't implement the RI bit, so assume it's OK */
7983:
799BEGIN_FTR_SECTION
800 b 2b
801END_FTR_SECTION_IFSET(CPU_FTR_601)
802 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000803 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000804 addi r3,r1,STACK_FRAME_OVERHEAD
805 lis r10,MSR_KERNEL@h
806 ori r10,r10,MSR_KERNEL@l
807 bl transfer_to_handler_full
808 .long nonrecoverable_exception
809 .long ret_from_except
810#endif
811
Paul Mackerras9994a332005-10-10 22:36:14 +1000812 .globl ret_from_except_full
813ret_from_except_full:
814 REST_NVGPRS(r1)
815 /* fall through */
816
817 .globl ret_from_except
818ret_from_except:
819 /* Hard-disable interrupts so that current_thread_info()->flags
820 * can't change between when we test it and when we return
821 * from the interrupt. */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000822 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000823 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
824 SYNC /* Some chip revs have problems here... */
825 MTMSRD(r10) /* disable interrupts */
826
827 lwz r3,_MSR(r1) /* Returning to user mode? */
828 andi. r0,r3,MSR_PR
829 beq resume_kernel
830
831user_exc_return: /* r10 contains MSR_KERNEL here */
832 /* Check current_thread_info()->flags */
Stuart Yoder9778b692012-07-05 04:41:35 +0000833 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000834 lwz r9,TI_FLAGS(r9)
Roland McGrath7a101742008-04-28 17:30:37 +1000835 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000836 bne do_work
837
838restore_user:
839#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500840 /* Check whether this process has its own DBCR0 value. The internal
841 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000842 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000843 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000844 bnel- load_dbcr0
845#endif
846
Paul Mackerras9994a332005-10-10 22:36:14 +1000847 b restore
848
849/* N.B. the only way to get here is from the beq following ret_from_except. */
850resume_kernel:
Tiejun Chena9c4e542012-09-16 23:54:30 +0000851 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
Stuart Yoder9778b692012-07-05 04:41:35 +0000852 CURRENT_THREAD_INFO(r9, r1)
Tiejun Chena9c4e542012-09-16 23:54:30 +0000853 lwz r8,TI_FLAGS(r9)
854 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
855 beq+ 1f
856
857 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
858
859 lwz r3,GPR1(r1)
860 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
861 mr r4,r1 /* src: current exception frame */
862 mr r1,r3 /* Reroute the trampoline frame to r1 */
863
864 /* Copy from the original to the trampoline. */
865 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
866 li r6,0 /* start offset: 0 */
867 mtctr r5
8682: lwzx r0,r6,r4
869 stwx r0,r6,r3
870 addi r6,r6,4
871 bdnz 2b
872
873 /* Do real store operation to complete stwu */
874 lwz r5,GPR1(r1)
875 stw r8,0(r5)
876
877 /* Clear _TIF_EMULATE_STACK_STORE flag */
878 lis r11,_TIF_EMULATE_STACK_STORE@h
879 addi r5,r9,TI_FLAGS
8800: lwarx r8,0,r5
881 andc r8,r8,r11
882#ifdef CONFIG_IBM405_ERR77
883 dcbt 0,r5
884#endif
885 stwcx. r8,0,r5
886 bne- 0b
8871:
888
889#ifdef CONFIG_PREEMPT
890 /* check current_thread_info->preempt_count */
Paul Mackerras9994a332005-10-10 22:36:14 +1000891 lwz r0,TI_PREEMPT(r9)
892 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
893 bne restore
Tiejun Chena9c4e542012-09-16 23:54:30 +0000894 andi. r8,r8,_TIF_NEED_RESCHED
Paul Mackerras9994a332005-10-10 22:36:14 +1000895 beq+ restore
Tiejun Chena9c4e542012-09-16 23:54:30 +0000896 lwz r3,_MSR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000897 andi. r0,r3,MSR_EE /* interrupts off? */
898 beq restore /* don't schedule if so */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000899#ifdef CONFIG_TRACE_IRQFLAGS
900 /* Lockdep thinks irqs are enabled, we need to call
901 * preempt_schedule_irq with IRQs off, so we inform lockdep
902 * now that we -did- turn them off already
903 */
904 bl trace_hardirqs_off
905#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10009061: bl preempt_schedule_irq
Stuart Yoder9778b692012-07-05 04:41:35 +0000907 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000908 lwz r3,TI_FLAGS(r9)
909 andi. r0,r3,_TIF_NEED_RESCHED
910 bne- 1b
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000911#ifdef CONFIG_TRACE_IRQFLAGS
912 /* And now, to properly rebalance the above, we tell lockdep they
913 * are being turned back on, which will happen when we return
914 */
915 bl trace_hardirqs_on
916#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000917#endif /* CONFIG_PREEMPT */
918
919 /* interrupts are hard-disabled at this point */
920restore:
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100921#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000922BEGIN_MMU_FTR_SECTION
923 b 1f
924END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100925 lis r4,icache_44x_need_flush@ha
926 lwz r5,icache_44x_need_flush@l(r4)
927 cmplwi cr0,r5,0
928 beq+ 1f
929 li r6,0
930 iccci r0,r0
931 stw r6,icache_44x_need_flush@l(r4)
9321:
933#endif /* CONFIG_44x */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000934
935 lwz r9,_MSR(r1)
936#ifdef CONFIG_TRACE_IRQFLAGS
937 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
938 * off in this assembly code while peeking at TI_FLAGS() and such. However
939 * we need to inform it if the exception turned interrupts off, and we
940 * are about to trun them back on.
941 *
942 * The problem here sadly is that we don't know whether the exceptions was
943 * one that turned interrupts off or not. So we always tell lockdep about
944 * turning them on here when we go back to wherever we came from with EE
945 * on, even if that may meen some redudant calls being tracked. Maybe later
946 * we could encode what the exception did somewhere or test the exception
947 * type in the pt_regs but that sounds overkill
948 */
949 andi. r10,r9,MSR_EE
950 beq 1f
Steven Rostedt06ca2182010-12-22 16:42:56 +0000951 /*
952 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
953 * which is the stack frame here, we need to force a stack frame
954 * in case we came from user space.
955 */
956 stwu r1,-32(r1)
957 mflr r0
958 stw r0,4(r1)
959 stwu r1,-32(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000960 bl trace_hardirqs_on
Steven Rostedt06ca2182010-12-22 16:42:56 +0000961 lwz r1,0(r1)
962 lwz r1,0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000963 lwz r9,_MSR(r1)
9641:
965#endif /* CONFIG_TRACE_IRQFLAGS */
966
Paul Mackerras9994a332005-10-10 22:36:14 +1000967 lwz r0,GPR0(r1)
968 lwz r2,GPR2(r1)
969 REST_4GPRS(3, r1)
970 REST_2GPRS(7, r1)
971
972 lwz r10,_XER(r1)
973 lwz r11,_CTR(r1)
974 mtspr SPRN_XER,r10
975 mtctr r11
976
977 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100978BEGIN_FTR_SECTION
979 lwarx r11,0,r1
980END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000981 stwcx. r0,0,r1 /* to clear the reservation */
982
983#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
Paul Mackerras9994a332005-10-10 22:36:14 +1000984 andi. r10,r9,MSR_RI /* check if this exception occurred */
985 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
986
987 lwz r10,_CCR(r1)
988 lwz r11,_LINK(r1)
989 mtcrf 0xFF,r10
990 mtlr r11
991
992 /*
993 * Once we put values in SRR0 and SRR1, we are in a state
994 * where exceptions are not recoverable, since taking an
995 * exception will trash SRR0 and SRR1. Therefore we clear the
996 * MSR:RI bit to indicate this. If we do take an exception,
997 * we can't return to the point of the exception but we
998 * can restart the exception exit path at the label
999 * exc_exit_restart below. -- paulus
1000 */
1001 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
1002 SYNC
1003 MTMSRD(r10) /* clear the RI bit */
1004 .globl exc_exit_restart
1005exc_exit_restart:
Paul Mackerras9994a332005-10-10 22:36:14 +10001006 lwz r12,_NIP(r1)
1007 FIX_SRR1(r9,r10)
1008 mtspr SPRN_SRR0,r12
1009 mtspr SPRN_SRR1,r9
1010 REST_4GPRS(9, r1)
1011 lwz r1,GPR1(r1)
1012 .globl exc_exit_restart_end
1013exc_exit_restart_end:
1014 SYNC
1015 RFI
1016
1017#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1018 /*
1019 * This is a bit different on 4xx/Book-E because it doesn't have
1020 * the RI bit in the MSR.
1021 * The TLB miss handler checks if we have interrupted
1022 * the exception exit path and restarts it if so
1023 * (well maybe one day it will... :).
1024 */
1025 lwz r11,_LINK(r1)
1026 mtlr r11
1027 lwz r10,_CCR(r1)
1028 mtcrf 0xff,r10
1029 REST_2GPRS(9, r1)
1030 .globl exc_exit_restart
1031exc_exit_restart:
1032 lwz r11,_NIP(r1)
1033 lwz r12,_MSR(r1)
1034exc_exit_start:
1035 mtspr SPRN_SRR0,r11
1036 mtspr SPRN_SRR1,r12
1037 REST_2GPRS(11, r1)
1038 lwz r1,GPR1(r1)
1039 .globl exc_exit_restart_end
1040exc_exit_restart_end:
1041 PPC405_ERR77_SYNC
1042 rfi
1043 b . /* prevent prefetch past rfi */
1044
1045/*
1046 * Returning from a critical interrupt in user mode doesn't need
1047 * to be any different from a normal exception. For a critical
1048 * interrupt in the kernel, we just return (without checking for
1049 * preemption) since the interrupt may have happened at some crucial
1050 * place (e.g. inside the TLB miss handler), and because we will be
1051 * running with r1 pointing into critical_stack, not the current
1052 * process's kernel stack (and therefore current_thread_info() will
1053 * give the wrong answer).
1054 * We have to restore various SPRs that may have been in use at the
1055 * time of the critical interrupt.
1056 *
1057 */
1058#ifdef CONFIG_40x
1059#define PPC_40x_TURN_OFF_MSR_DR \
1060 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1061 * assume the instructions here are mapped by a pinned TLB entry */ \
1062 li r10,MSR_IR; \
1063 mtmsr r10; \
1064 isync; \
1065 tophys(r1, r1);
1066#else
1067#define PPC_40x_TURN_OFF_MSR_DR
1068#endif
1069
1070#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1071 REST_NVGPRS(r1); \
1072 lwz r3,_MSR(r1); \
1073 andi. r3,r3,MSR_PR; \
1074 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1075 bne user_exc_return; \
1076 lwz r0,GPR0(r1); \
1077 lwz r2,GPR2(r1); \
1078 REST_4GPRS(3, r1); \
1079 REST_2GPRS(7, r1); \
1080 lwz r10,_XER(r1); \
1081 lwz r11,_CTR(r1); \
1082 mtspr SPRN_XER,r10; \
1083 mtctr r11; \
1084 PPC405_ERR77(0,r1); \
1085 stwcx. r0,0,r1; /* to clear the reservation */ \
1086 lwz r11,_LINK(r1); \
1087 mtlr r11; \
1088 lwz r10,_CCR(r1); \
1089 mtcrf 0xff,r10; \
1090 PPC_40x_TURN_OFF_MSR_DR; \
1091 lwz r9,_DEAR(r1); \
1092 lwz r10,_ESR(r1); \
1093 mtspr SPRN_DEAR,r9; \
1094 mtspr SPRN_ESR,r10; \
1095 lwz r11,_NIP(r1); \
1096 lwz r12,_MSR(r1); \
1097 mtspr exc_lvl_srr0,r11; \
1098 mtspr exc_lvl_srr1,r12; \
1099 lwz r9,GPR9(r1); \
1100 lwz r12,GPR12(r1); \
1101 lwz r10,GPR10(r1); \
1102 lwz r11,GPR11(r1); \
1103 lwz r1,GPR1(r1); \
1104 PPC405_ERR77_SYNC; \
1105 exc_lvl_rfi; \
1106 b .; /* prevent prefetch past exc_lvl_rfi */
1107
Kumar Galafca622c2008-04-30 05:23:21 -05001108#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1109 lwz r9,_##exc_lvl_srr0(r1); \
1110 lwz r10,_##exc_lvl_srr1(r1); \
1111 mtspr SPRN_##exc_lvl_srr0,r9; \
1112 mtspr SPRN_##exc_lvl_srr1,r10;
1113
Kumar Gala70fe3af2009-02-12 16:12:40 -06001114#if defined(CONFIG_PPC_BOOK3E_MMU)
Kumar Galafca622c2008-04-30 05:23:21 -05001115#ifdef CONFIG_PHYS_64BIT
1116#define RESTORE_MAS7 \
1117 lwz r11,MAS7(r1); \
1118 mtspr SPRN_MAS7,r11;
1119#else
1120#define RESTORE_MAS7
1121#endif /* CONFIG_PHYS_64BIT */
1122#define RESTORE_MMU_REGS \
1123 lwz r9,MAS0(r1); \
1124 lwz r10,MAS1(r1); \
1125 lwz r11,MAS2(r1); \
1126 mtspr SPRN_MAS0,r9; \
1127 lwz r9,MAS3(r1); \
1128 mtspr SPRN_MAS1,r10; \
1129 lwz r10,MAS6(r1); \
1130 mtspr SPRN_MAS2,r11; \
1131 mtspr SPRN_MAS3,r9; \
1132 mtspr SPRN_MAS6,r10; \
1133 RESTORE_MAS7;
1134#elif defined(CONFIG_44x)
1135#define RESTORE_MMU_REGS \
1136 lwz r9,MMUCR(r1); \
1137 mtspr SPRN_MMUCR,r9;
1138#else
1139#define RESTORE_MMU_REGS
1140#endif
1141
1142#ifdef CONFIG_40x
Paul Mackerras9994a332005-10-10 22:36:14 +10001143 .globl ret_from_crit_exc
1144ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001145 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001146 lis r10,saved_ksp_limit@ha;
1147 lwz r10,saved_ksp_limit@l(r10);
1148 tovirt(r9,r9);
1149 stw r10,KSP_LIMIT(r9)
1150 lis r9,crit_srr0@ha;
1151 lwz r9,crit_srr0@l(r9);
1152 lis r10,crit_srr1@ha;
1153 lwz r10,crit_srr1@l(r10);
1154 mtspr SPRN_SRR0,r9;
1155 mtspr SPRN_SRR1,r10;
Kumar Gala16c57b32009-02-10 20:10:44 +00001156 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001157#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +10001158
1159#ifdef CONFIG_BOOKE
Kumar Galafca622c2008-04-30 05:23:21 -05001160 .globl ret_from_crit_exc
1161ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001162 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001163 lwz r10,SAVED_KSP_LIMIT(r1)
1164 stw r10,KSP_LIMIT(r9)
1165 RESTORE_xSRR(SRR0,SRR1);
1166 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001167 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001168
Paul Mackerras9994a332005-10-10 22:36:14 +10001169 .globl ret_from_debug_exc
1170ret_from_debug_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001171 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001172 lwz r10,SAVED_KSP_LIMIT(r1)
1173 stw r10,KSP_LIMIT(r9)
1174 lwz r9,THREAD_INFO-THREAD(r9)
Stuart Yoder9778b692012-07-05 04:41:35 +00001175 CURRENT_THREAD_INFO(r10, r1)
Kumar Galafca622c2008-04-30 05:23:21 -05001176 lwz r10,TI_PREEMPT(r10)
1177 stw r10,TI_PREEMPT(r9)
1178 RESTORE_xSRR(SRR0,SRR1);
1179 RESTORE_xSRR(CSRR0,CSRR1);
1180 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001181 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001182
1183 .globl ret_from_mcheck_exc
1184ret_from_mcheck_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001185 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001186 lwz r10,SAVED_KSP_LIMIT(r1)
1187 stw r10,KSP_LIMIT(r9)
1188 RESTORE_xSRR(SRR0,SRR1);
1189 RESTORE_xSRR(CSRR0,CSRR1);
1190 RESTORE_xSRR(DSRR0,DSRR1);
1191 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001192 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001193#endif /* CONFIG_BOOKE */
1194
1195/*
1196 * Load the DBCR0 value for a task that is being ptraced,
1197 * having first saved away the global DBCR0. Note that r0
1198 * has the dbcr0 value to set upon entry to this.
1199 */
1200load_dbcr0:
1201 mfmsr r10 /* first disable debug exceptions */
1202 rlwinm r10,r10,0,~MSR_DE
1203 mtmsr r10
1204 isync
1205 mfspr r10,SPRN_DBCR0
1206 lis r11,global_dbcr0@ha
1207 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -05001208#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +00001209 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -05001210 lwz r9,TI_CPU(r9)
1211 slwi r9,r9,3
1212 add r11,r11,r9
1213#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10001214 stw r10,0(r11)
1215 mtspr SPRN_DBCR0,r0
1216 lwz r10,4(r11)
1217 addi r10,r10,1
1218 stw r10,4(r11)
1219 li r11,-1
1220 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1221 blr
1222
Kumar Gala991eb432007-05-14 17:11:58 -05001223 .section .bss
1224 .align 4
1225global_dbcr0:
Kumar Gala4eaddb42008-04-09 16:15:40 -05001226 .space 8*NR_CPUS
Kumar Gala991eb432007-05-14 17:11:58 -05001227 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001228#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1229
1230do_work: /* r10 contains MSR_KERNEL here */
1231 andi. r0,r9,_TIF_NEED_RESCHED
1232 beq do_user_signal
1233
1234do_resched: /* r10 contains MSR_KERNEL here */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001235 /* Note: We don't need to inform lockdep that we are enabling
1236 * interrupts here. As far as it knows, they are already enabled
1237 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001238 ori r10,r10,MSR_EE
1239 SYNC
1240 MTMSRD(r10) /* hard-enable interrupts */
1241 bl schedule
1242recheck:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001243 /* Note: And we don't tell it we are disabling them again
1244 * neither. Those disable/enable cycles used to peek at
1245 * TI_FLAGS aren't advertised.
1246 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001247 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1248 SYNC
1249 MTMSRD(r10) /* disable interrupts */
Stuart Yoder9778b692012-07-05 04:41:35 +00001250 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001251 lwz r9,TI_FLAGS(r9)
1252 andi. r0,r9,_TIF_NEED_RESCHED
1253 bne- do_resched
Roland McGrath7a101742008-04-28 17:30:37 +10001254 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +10001255 beq restore_user
1256do_user_signal: /* r10 contains MSR_KERNEL here */
1257 ori r10,r10,MSR_EE
1258 SYNC
1259 MTMSRD(r10) /* hard-enable interrupts */
1260 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001261 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001262 andi. r0,r3,1
1263 beq 2f
1264 SAVE_NVGPRS(r1)
1265 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001266 stw r3,_TRAP(r1)
Roland McGrath7d6d6372008-07-27 16:52:52 +100012672: addi r3,r1,STACK_FRAME_OVERHEAD
1268 mr r4,r9
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +11001269 bl do_notify_resume
Paul Mackerras9994a332005-10-10 22:36:14 +10001270 REST_NVGPRS(r1)
1271 b recheck
1272
1273/*
1274 * We come here when we are at the end of handling an exception
1275 * that occurred at a place where taking an exception will lose
1276 * state information, such as the contents of SRR0 and SRR1.
1277 */
1278nonrecoverable:
1279 lis r10,exc_exit_restart_end@ha
1280 addi r10,r10,exc_exit_restart_end@l
1281 cmplw r12,r10
1282 bge 3f
1283 lis r11,exc_exit_restart@ha
1284 addi r11,r11,exc_exit_restart@l
1285 cmplw r12,r11
1286 blt 3f
1287 lis r10,ee_restarts@ha
1288 lwz r12,ee_restarts@l(r10)
1289 addi r12,r12,1
1290 stw r12,ee_restarts@l(r10)
1291 mr r12,r11 /* restart at exc_exit_restart */
1292 blr
12933: /* OK, we can't recover, kill this process */
1294 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1295BEGIN_FTR_SECTION
1296 blr
1297END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001298 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001299 andi. r0,r3,1
1300 beq 4f
1301 SAVE_NVGPRS(r1)
1302 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001303 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100013044: addi r3,r1,STACK_FRAME_OVERHEAD
1305 bl nonrecoverable_exception
1306 /* shouldn't return */
1307 b 4b
1308
Kumar Gala991eb432007-05-14 17:11:58 -05001309 .section .bss
1310 .align 2
1311ee_restarts:
1312 .space 4
1313 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001314
1315/*
1316 * PROM code for specific machines follows. Put it
1317 * here so it's easy to add arch-specific sections later.
1318 * -- Cort
1319 */
Paul Mackerras033ef332005-10-26 17:05:24 +10001320#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +10001321/*
1322 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1323 * called with the MMU off.
1324 */
1325_GLOBAL(enter_rtas)
1326 stwu r1,-INT_FRAME_SIZE(r1)
1327 mflr r0
1328 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +11001329 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001330 lis r6,1f@ha /* physical return address for rtas */
1331 addi r6,r6,1f@l
1332 tophys(r6,r6)
1333 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +10001334 lwz r8,RTASENTRY(r4)
1335 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +10001336 mfmsr r9
1337 stw r9,8(r1)
1338 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1339 SYNC /* disable interrupts so SRR0/1 */
1340 MTMSRD(r0) /* don't get trashed */
1341 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1342 mtlr r6
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001343 mtspr SPRN_SPRG_RTAS,r7
Paul Mackerras9994a332005-10-10 22:36:14 +10001344 mtspr SPRN_SRR0,r8
1345 mtspr SPRN_SRR1,r9
1346 RFI
13471: tophys(r9,r1)
1348 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1349 lwz r9,8(r9) /* original msr value */
1350 FIX_SRR1(r9,r0)
1351 addi r1,r1,INT_FRAME_SIZE
1352 li r0,0
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001353 mtspr SPRN_SPRG_RTAS,r0
Paul Mackerras9994a332005-10-10 22:36:14 +10001354 mtspr SPRN_SRR0,r8
1355 mtspr SPRN_SRR1,r9
1356 RFI /* return to caller */
1357
1358 .globl machine_check_in_rtas
1359machine_check_in_rtas:
1360 twi 31,0,0
1361 /* XXX load up BATs and panic */
1362
Paul Mackerras033ef332005-10-26 17:05:24 +10001363#endif /* CONFIG_PPC_RTAS */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001364
Steven Rostedt606576c2008-10-06 19:06:12 -04001365#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001366#ifdef CONFIG_DYNAMIC_FTRACE
1367_GLOBAL(mcount)
1368_GLOBAL(_mcount)
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001369 /*
1370 * It is required that _mcount on PPC32 must preserve the
1371 * link register. But we have r0 to play with. We use r0
1372 * to push the return address back to the caller of mcount
1373 * into the ctr register, restore the link register and
1374 * then jump back using the ctr register.
1375 */
1376 mflr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001377 mtctr r0
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001378 lwz r0, 4(r1)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001379 mtlr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001380 bctr
1381
1382_GLOBAL(ftrace_caller)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001383 MCOUNT_SAVE_FRAME
1384 /* r3 ends up with link register */
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301385 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001386.globl ftrace_call
1387ftrace_call:
1388 bl ftrace_stub
1389 nop
Steven Rostedt60ce8f72009-02-11 20:06:43 -05001390#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1391.globl ftrace_graph_call
1392ftrace_graph_call:
1393 b ftrace_graph_stub
1394_GLOBAL(ftrace_graph_stub)
1395#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001396 MCOUNT_RESTORE_FRAME
1397 /* old link register ends up in ctr reg */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001398 bctr
1399#else
1400_GLOBAL(mcount)
1401_GLOBAL(_mcount)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001402
1403 MCOUNT_SAVE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001404
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301405 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001406 LOAD_REG_ADDR(r5, ftrace_trace_function)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001407 lwz r5,0(r5)
Steven Rostedtccbfac22008-05-22 14:31:07 -04001408
Steven Rostedt4e491d12008-05-14 23:49:44 -04001409 mtctr r5
1410 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001411 nop
1412
Steven Rostedtfad4f472009-02-11 19:10:57 -05001413#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1414 b ftrace_graph_caller
1415#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001416 MCOUNT_RESTORE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001417 bctr
1418#endif
1419
1420_GLOBAL(ftrace_stub)
1421 blr
1422
Steven Rostedtfad4f472009-02-11 19:10:57 -05001423#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1424_GLOBAL(ftrace_graph_caller)
1425 /* load r4 with local address */
1426 lwz r4, 44(r1)
1427 subi r4, r4, MCOUNT_INSN_SIZE
1428
1429 /* get the parent address */
1430 addi r3, r1, 52
1431
1432 bl prepare_ftrace_return
1433 nop
1434
1435 MCOUNT_RESTORE_FRAME
1436 /* old link register ends up in ctr reg */
1437 bctr
1438
1439_GLOBAL(return_to_handler)
1440 /* need to save return values */
1441 stwu r1, -32(r1)
1442 stw r3, 20(r1)
1443 stw r4, 16(r1)
1444 stw r31, 12(r1)
1445 mr r31, r1
1446
1447 bl ftrace_return_to_handler
1448 nop
1449
1450 /* return value has real return address */
1451 mtlr r3
1452
1453 lwz r3, 20(r1)
1454 lwz r4, 16(r1)
1455 lwz r31,12(r1)
1456 lwz r1, 0(r1)
1457
1458 /* Jump back to real return address */
1459 blr
1460#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1461
Steven Rostedt4e491d12008-05-14 23:49:44 -04001462#endif /* CONFIG_MCOUNT */