| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1 | /* | 
|  | 2 | *  PowerPC version | 
|  | 3 | *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
|  | 4 | *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP | 
|  | 5 | *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> | 
|  | 6 | *  Adapted for Power Macintosh by Paul Mackerras. | 
|  | 7 | *  Low-level exception handlers and MMU support | 
|  | 8 | *  rewritten by Paul Mackerras. | 
|  | 9 | *    Copyright (C) 1996 Paul Mackerras. | 
|  | 10 | *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | 
|  | 11 | * | 
|  | 12 | *  This file contains the system call entry code, context switch | 
|  | 13 | *  code, and exception/interrupt return code for PowerPC. | 
|  | 14 | * | 
|  | 15 | *  This program is free software; you can redistribute it and/or | 
|  | 16 | *  modify it under the terms of the GNU General Public License | 
|  | 17 | *  as published by the Free Software Foundation; either version | 
|  | 18 | *  2 of the License, or (at your option) any later version. | 
|  | 19 | * | 
|  | 20 | */ | 
|  | 21 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 22 | #include <linux/errno.h> | 
|  | 23 | #include <linux/sys.h> | 
|  | 24 | #include <linux/threads.h> | 
|  | 25 | #include <asm/reg.h> | 
|  | 26 | #include <asm/page.h> | 
|  | 27 | #include <asm/mmu.h> | 
|  | 28 | #include <asm/cputable.h> | 
|  | 29 | #include <asm/thread_info.h> | 
|  | 30 | #include <asm/ppc_asm.h> | 
|  | 31 | #include <asm/asm-offsets.h> | 
|  | 32 | #include <asm/unistd.h> | 
| Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 33 | #include <asm/ftrace.h> | 
| Stephen Rothwell | 46f5221 | 2010-11-18 15:06:17 +0000 | [diff] [blame] | 34 | #include <asm/ptrace.h> | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 35 |  | 
|  | 36 | #undef SHOW_SYSCALLS | 
|  | 37 | #undef SHOW_SYSCALLS_TASK | 
|  | 38 |  | 
|  | 39 | /* | 
|  | 40 | * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. | 
|  | 41 | */ | 
|  | 42 | #if MSR_KERNEL >= 0x10000 | 
|  | 43 | #define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l | 
|  | 44 | #else | 
|  | 45 | #define LOAD_MSR_KERNEL(r, x)	li r,(x) | 
|  | 46 | #endif | 
|  | 47 |  | 
|  | 48 | #ifdef CONFIG_BOOKE | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 49 | .globl	mcheck_transfer_to_handler | 
|  | 50 | mcheck_transfer_to_handler: | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 51 | mfspr	r0,SPRN_DSRR0 | 
|  | 52 | stw	r0,_DSRR0(r11) | 
|  | 53 | mfspr	r0,SPRN_DSRR1 | 
|  | 54 | stw	r0,_DSRR1(r11) | 
|  | 55 | /* fall through */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 56 |  | 
|  | 57 | .globl	debug_transfer_to_handler | 
|  | 58 | debug_transfer_to_handler: | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 59 | mfspr	r0,SPRN_CSRR0 | 
|  | 60 | stw	r0,_CSRR0(r11) | 
|  | 61 | mfspr	r0,SPRN_CSRR1 | 
|  | 62 | stw	r0,_CSRR1(r11) | 
|  | 63 | /* fall through */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 64 |  | 
|  | 65 | .globl	crit_transfer_to_handler | 
|  | 66 | crit_transfer_to_handler: | 
| Kumar Gala | 70fe3af | 2009-02-12 16:12:40 -0600 | [diff] [blame] | 67 | #ifdef CONFIG_PPC_BOOK3E_MMU | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 68 | mfspr	r0,SPRN_MAS0 | 
|  | 69 | stw	r0,MAS0(r11) | 
|  | 70 | mfspr	r0,SPRN_MAS1 | 
|  | 71 | stw	r0,MAS1(r11) | 
|  | 72 | mfspr	r0,SPRN_MAS2 | 
|  | 73 | stw	r0,MAS2(r11) | 
|  | 74 | mfspr	r0,SPRN_MAS3 | 
|  | 75 | stw	r0,MAS3(r11) | 
|  | 76 | mfspr	r0,SPRN_MAS6 | 
|  | 77 | stw	r0,MAS6(r11) | 
|  | 78 | #ifdef CONFIG_PHYS_64BIT | 
|  | 79 | mfspr	r0,SPRN_MAS7 | 
|  | 80 | stw	r0,MAS7(r11) | 
|  | 81 | #endif /* CONFIG_PHYS_64BIT */ | 
| Kumar Gala | 70fe3af | 2009-02-12 16:12:40 -0600 | [diff] [blame] | 82 | #endif /* CONFIG_PPC_BOOK3E_MMU */ | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 83 | #ifdef CONFIG_44x | 
|  | 84 | mfspr	r0,SPRN_MMUCR | 
|  | 85 | stw	r0,MMUCR(r11) | 
|  | 86 | #endif | 
|  | 87 | mfspr	r0,SPRN_SRR0 | 
|  | 88 | stw	r0,_SRR0(r11) | 
|  | 89 | mfspr	r0,SPRN_SRR1 | 
|  | 90 | stw	r0,_SRR1(r11) | 
|  | 91 |  | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 92 | mfspr	r8,SPRN_SPRG_THREAD | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 93 | lwz	r0,KSP_LIMIT(r8) | 
|  | 94 | stw	r0,SAVED_KSP_LIMIT(r11) | 
|  | 95 | rlwimi	r0,r1,0,0,(31-THREAD_SHIFT) | 
|  | 96 | stw	r0,KSP_LIMIT(r8) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 97 | /* fall through */ | 
|  | 98 | #endif | 
|  | 99 |  | 
|  | 100 | #ifdef CONFIG_40x | 
|  | 101 | .globl	crit_transfer_to_handler | 
|  | 102 | crit_transfer_to_handler: | 
|  | 103 | lwz	r0,crit_r10@l(0) | 
|  | 104 | stw	r0,GPR10(r11) | 
|  | 105 | lwz	r0,crit_r11@l(0) | 
|  | 106 | stw	r0,GPR11(r11) | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 107 | mfspr	r0,SPRN_SRR0 | 
|  | 108 | stw	r0,crit_srr0@l(0) | 
|  | 109 | mfspr	r0,SPRN_SRR1 | 
|  | 110 | stw	r0,crit_srr1@l(0) | 
|  | 111 |  | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 112 | mfspr	r8,SPRN_SPRG_THREAD | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 113 | lwz	r0,KSP_LIMIT(r8) | 
|  | 114 | stw	r0,saved_ksp_limit@l(0) | 
|  | 115 | rlwimi	r0,r1,0,0,(31-THREAD_SHIFT) | 
|  | 116 | stw	r0,KSP_LIMIT(r8) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 117 | /* fall through */ | 
|  | 118 | #endif | 
|  | 119 |  | 
|  | 120 | /* | 
|  | 121 | * This code finishes saving the registers to the exception frame | 
|  | 122 | * and jumps to the appropriate handler for the exception, turning | 
|  | 123 | * on address translation. | 
|  | 124 | * Note that we rely on the caller having set cr0.eq iff the exception | 
|  | 125 | * occurred in kernel mode (i.e. MSR:PR = 0). | 
|  | 126 | */ | 
|  | 127 | .globl	transfer_to_handler_full | 
|  | 128 | transfer_to_handler_full: | 
|  | 129 | SAVE_NVGPRS(r11) | 
|  | 130 | /* fall through */ | 
|  | 131 |  | 
|  | 132 | .globl	transfer_to_handler | 
|  | 133 | transfer_to_handler: | 
|  | 134 | stw	r2,GPR2(r11) | 
|  | 135 | stw	r12,_NIP(r11) | 
|  | 136 | stw	r9,_MSR(r11) | 
|  | 137 | andi.	r2,r9,MSR_PR | 
|  | 138 | mfctr	r12 | 
|  | 139 | mfspr	r2,SPRN_XER | 
|  | 140 | stw	r12,_CTR(r11) | 
|  | 141 | stw	r2,_XER(r11) | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 142 | mfspr	r12,SPRN_SPRG_THREAD | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 143 | addi	r2,r12,-THREAD | 
|  | 144 | tovirt(r2,r2)			/* set r2 to current */ | 
|  | 145 | beq	2f			/* if from user, fix up THREAD.regs */ | 
|  | 146 | addi	r11,r1,STACK_FRAME_OVERHEAD | 
|  | 147 | stw	r11,PT_REGS(r12) | 
|  | 148 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 
|  | 149 | /* Check to see if the dbcr0 register is set up to debug.  Use the | 
| Kumar Gala | 4eaddb4 | 2008-04-09 16:15:40 -0500 | [diff] [blame] | 150 | internal debug mode bit to do this. */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 151 | lwz	r12,THREAD_DBCR0(r12) | 
| Kumar Gala | 2325f0a | 2008-07-26 05:27:33 +1000 | [diff] [blame] | 152 | andis.	r12,r12,DBCR0_IDM@h | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 153 | beq+	3f | 
|  | 154 | /* From user and task is ptraced - load up global dbcr0 */ | 
|  | 155 | li	r12,-1			/* clear all pending debug events */ | 
|  | 156 | mtspr	SPRN_DBSR,r12 | 
|  | 157 | lis	r11,global_dbcr0@ha | 
|  | 158 | tophys(r11,r11) | 
|  | 159 | addi	r11,r11,global_dbcr0@l | 
| Kumar Gala | 4eaddb4 | 2008-04-09 16:15:40 -0500 | [diff] [blame] | 160 | #ifdef CONFIG_SMP | 
|  | 161 | rlwinm	r9,r1,0,0,(31-THREAD_SHIFT) | 
|  | 162 | lwz	r9,TI_CPU(r9) | 
|  | 163 | slwi	r9,r9,3 | 
|  | 164 | add	r11,r11,r9 | 
|  | 165 | #endif | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 166 | lwz	r12,0(r11) | 
|  | 167 | mtspr	SPRN_DBCR0,r12 | 
|  | 168 | lwz	r12,4(r11) | 
|  | 169 | addi	r12,r12,-1 | 
|  | 170 | stw	r12,4(r11) | 
|  | 171 | #endif | 
|  | 172 | b	3f | 
| Paul Mackerras | f39224a | 2006-04-18 21:49:11 +1000 | [diff] [blame] | 173 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 174 | 2:	/* if from kernel, check interrupted DOZE/NAP mode and | 
|  | 175 | * check for stack overflow | 
|  | 176 | */ | 
| Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 177 | lwz	r9,KSP_LIMIT(r12) | 
|  | 178 | cmplw	r1,r9			/* if r1 <= ksp_limit */ | 
| Paul Mackerras | f39224a | 2006-04-18 21:49:11 +1000 | [diff] [blame] | 179 | ble-	stack_ovf		/* then the kernel stack overflowed */ | 
|  | 180 | 5: | 
| Kumar Gala | fc4033b | 2008-06-18 16:26:52 -0500 | [diff] [blame] | 181 | #if defined(CONFIG_6xx) || defined(CONFIG_E500) | 
| Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 182 | rlwinm	r9,r1,0,0,31-THREAD_SHIFT | 
| Paul Mackerras | f39224a | 2006-04-18 21:49:11 +1000 | [diff] [blame] | 183 | tophys(r9,r9)			/* check local flags */ | 
|  | 184 | lwz	r12,TI_LOCAL_FLAGS(r9) | 
|  | 185 | mtcrf	0x01,r12 | 
|  | 186 | bt-	31-TLF_NAPPING,4f | 
| Paul Mackerras | a560643 | 2008-05-14 14:30:48 +1000 | [diff] [blame] | 187 | bt-	31-TLF_SLEEPING,7f | 
| Kumar Gala | fc4033b | 2008-06-18 16:26:52 -0500 | [diff] [blame] | 188 | #endif /* CONFIG_6xx || CONFIG_E500 */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 189 | .globl transfer_to_handler_cont | 
|  | 190 | transfer_to_handler_cont: | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 191 | 3: | 
|  | 192 | mflr	r9 | 
|  | 193 | lwz	r11,0(r9)		/* virtual address of handler */ | 
|  | 194 | lwz	r9,4(r9)		/* where to go when done */ | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 195 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 196 | lis	r12,reenable_mmu@h | 
|  | 197 | ori	r12,r12,reenable_mmu@l | 
|  | 198 | mtspr	SPRN_SRR0,r12 | 
|  | 199 | mtspr	SPRN_SRR1,r10 | 
|  | 200 | SYNC | 
|  | 201 | RFI | 
|  | 202 | reenable_mmu:				/* re-enable mmu so we can */ | 
|  | 203 | mfmsr	r10 | 
|  | 204 | lwz	r12,_MSR(r1) | 
|  | 205 | xor	r10,r10,r12 | 
|  | 206 | andi.	r10,r10,MSR_EE		/* Did EE change? */ | 
|  | 207 | beq	1f | 
|  | 208 |  | 
|  | 209 | /* Save handler and return address into the 2 unused words | 
|  | 210 | * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything | 
|  | 211 | * else can be recovered from the pt_regs except r3 which for | 
|  | 212 | * normal interrupts has been set to pt_regs and for syscalls | 
|  | 213 | * is an argument, so we temporarily use ORIG_GPR3 to save it | 
|  | 214 | */ | 
|  | 215 | stw	r9,8(r1) | 
|  | 216 | stw	r11,12(r1) | 
|  | 217 | stw	r3,ORIG_GPR3(r1) | 
|  | 218 | bl	trace_hardirqs_off | 
|  | 219 | lwz	r0,GPR0(r1) | 
|  | 220 | lwz	r3,ORIG_GPR3(r1) | 
|  | 221 | lwz	r4,GPR4(r1) | 
|  | 222 | lwz	r5,GPR5(r1) | 
|  | 223 | lwz	r6,GPR6(r1) | 
|  | 224 | lwz	r7,GPR7(r1) | 
|  | 225 | lwz	r8,GPR8(r1) | 
|  | 226 | lwz	r9,8(r1) | 
|  | 227 | lwz	r11,12(r1) | 
|  | 228 | 1:	mtctr	r11 | 
|  | 229 | mtlr	r9 | 
|  | 230 | bctr				/* jump to handler */ | 
|  | 231 | #else /* CONFIG_TRACE_IRQFLAGS */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 232 | mtspr	SPRN_SRR0,r11 | 
|  | 233 | mtspr	SPRN_SRR1,r10 | 
|  | 234 | mtlr	r9 | 
|  | 235 | SYNC | 
|  | 236 | RFI				/* jump to handler, enable MMU */ | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 237 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 238 |  | 
| Kumar Gala | fc4033b | 2008-06-18 16:26:52 -0500 | [diff] [blame] | 239 | #if defined (CONFIG_6xx) || defined(CONFIG_E500) | 
| Paul Mackerras | f39224a | 2006-04-18 21:49:11 +1000 | [diff] [blame] | 240 | 4:	rlwinm	r12,r12,0,~_TLF_NAPPING | 
|  | 241 | stw	r12,TI_LOCAL_FLAGS(r9) | 
| Kumar Gala | fc4033b | 2008-06-18 16:26:52 -0500 | [diff] [blame] | 242 | b	power_save_ppc32_restore | 
| Paul Mackerras | a560643 | 2008-05-14 14:30:48 +1000 | [diff] [blame] | 243 |  | 
|  | 244 | 7:	rlwinm	r12,r12,0,~_TLF_SLEEPING | 
|  | 245 | stw	r12,TI_LOCAL_FLAGS(r9) | 
|  | 246 | lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */ | 
|  | 247 | rlwinm	r9,r9,0,~MSR_EE | 
|  | 248 | lwz	r12,_LINK(r11)		/* and return to address in LR */ | 
|  | 249 | b	fast_exception_return | 
| Paul Mackerras | a0652fc | 2006-03-27 15:03:03 +1100 | [diff] [blame] | 250 | #endif | 
|  | 251 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 252 | /* | 
|  | 253 | * On kernel stack overflow, load up an initial stack pointer | 
|  | 254 | * and call StackOverflow(regs), which should not return. | 
|  | 255 | */ | 
|  | 256 | stack_ovf: | 
|  | 257 | /* sometimes we use a statically-allocated stack, which is OK. */ | 
| Paul Mackerras | f39224a | 2006-04-18 21:49:11 +1000 | [diff] [blame] | 258 | lis	r12,_end@h | 
|  | 259 | ori	r12,r12,_end@l | 
|  | 260 | cmplw	r1,r12 | 
|  | 261 | ble	5b			/* r1 <= &_end is OK */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 262 | SAVE_NVGPRS(r11) | 
|  | 263 | addi	r3,r1,STACK_FRAME_OVERHEAD | 
|  | 264 | lis	r1,init_thread_union@ha | 
|  | 265 | addi	r1,r1,init_thread_union@l | 
|  | 266 | addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | 
|  | 267 | lis	r9,StackOverflow@ha | 
|  | 268 | addi	r9,r9,StackOverflow@l | 
|  | 269 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 
|  | 270 | FIX_SRR1(r10,r12) | 
|  | 271 | mtspr	SPRN_SRR0,r9 | 
|  | 272 | mtspr	SPRN_SRR1,r10 | 
|  | 273 | SYNC | 
|  | 274 | RFI | 
|  | 275 |  | 
|  | 276 | /* | 
|  | 277 | * Handle a system call. | 
|  | 278 | */ | 
|  | 279 | .stabs	"arch/powerpc/kernel/",N_SO,0,0,0f | 
|  | 280 | .stabs	"entry_32.S",N_SO,0,0,0f | 
|  | 281 | 0: | 
|  | 282 |  | 
|  | 283 | _GLOBAL(DoSyscall) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 284 | stw	r3,ORIG_GPR3(r1) | 
|  | 285 | li	r12,0 | 
|  | 286 | stw	r12,RESULT(r1) | 
|  | 287 | lwz	r11,_CCR(r1)	/* Clear SO bit in CR */ | 
|  | 288 | rlwinm	r11,r11,0,4,2 | 
|  | 289 | stw	r11,_CCR(r1) | 
|  | 290 | #ifdef SHOW_SYSCALLS | 
|  | 291 | bl	do_show_syscall | 
|  | 292 | #endif /* SHOW_SYSCALLS */ | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 293 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 294 | /* Return from syscalls can (and generally will) hard enable | 
|  | 295 | * interrupts. You aren't supposed to call a syscall with | 
|  | 296 | * interrupts disabled in the first place. However, to ensure | 
|  | 297 | * that we get it right vs. lockdep if it happens, we force | 
|  | 298 | * that hard enable here with appropriate tracing if we see | 
|  | 299 | * that we have been called with interrupts off | 
|  | 300 | */ | 
|  | 301 | mfmsr	r11 | 
|  | 302 | andi.	r12,r11,MSR_EE | 
|  | 303 | bne+	1f | 
|  | 304 | /* We came in with interrupts disabled, we enable them now */ | 
|  | 305 | bl	trace_hardirqs_on | 
|  | 306 | mfmsr	r11 | 
|  | 307 | lwz	r0,GPR0(r1) | 
|  | 308 | lwz	r3,GPR3(r1) | 
|  | 309 | lwz	r4,GPR4(r1) | 
|  | 310 | ori	r11,r11,MSR_EE | 
|  | 311 | lwz	r5,GPR5(r1) | 
|  | 312 | lwz	r6,GPR6(r1) | 
|  | 313 | lwz	r7,GPR7(r1) | 
|  | 314 | lwz	r8,GPR8(r1) | 
|  | 315 | mtmsr	r11 | 
|  | 316 | 1: | 
|  | 317 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 
| David Gibson | 6cb7bfe | 2005-10-21 15:45:50 +1000 | [diff] [blame] | 318 | rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 319 | lwz	r11,TI_FLAGS(r10) | 
|  | 320 | andi.	r11,r11,_TIF_SYSCALL_T_OR_A | 
|  | 321 | bne-	syscall_dotrace | 
|  | 322 | syscall_dotrace_cont: | 
|  | 323 | cmplwi	0,r0,NR_syscalls | 
|  | 324 | lis	r10,sys_call_table@h | 
|  | 325 | ori	r10,r10,sys_call_table@l | 
|  | 326 | slwi	r0,r0,2 | 
|  | 327 | bge-	66f | 
|  | 328 | lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */ | 
|  | 329 | mtlr	r10 | 
|  | 330 | addi	r9,r1,STACK_FRAME_OVERHEAD | 
|  | 331 | PPC440EP_ERR42 | 
|  | 332 | blrl			/* Call handler */ | 
|  | 333 | .globl	ret_from_syscall | 
|  | 334 | ret_from_syscall: | 
|  | 335 | #ifdef SHOW_SYSCALLS | 
|  | 336 | bl	do_show_syscall_exit | 
|  | 337 | #endif | 
|  | 338 | mr	r6,r3 | 
| David Gibson | 6cb7bfe | 2005-10-21 15:45:50 +1000 | [diff] [blame] | 339 | rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 340 | /* disable interrupts so current_thread_info()->flags can't change */ | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 341 | LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */ | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 342 | /* Note: We don't bother telling lockdep about it */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 343 | SYNC | 
|  | 344 | MTMSRD(r10) | 
|  | 345 | lwz	r9,TI_FLAGS(r12) | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 346 | li	r8,-_LAST_ERRNO | 
| Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 347 | andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 348 | bne-	syscall_exit_work | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 349 | cmplw	0,r3,r8 | 
|  | 350 | blt+	syscall_exit_cont | 
|  | 351 | lwz	r11,_CCR(r1)			/* Load CR */ | 
|  | 352 | neg	r3,r3 | 
|  | 353 | oris	r11,r11,0x1000	/* Set SO bit in CR */ | 
|  | 354 | stw	r11,_CCR(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 355 | syscall_exit_cont: | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 356 | lwz	r8,_MSR(r1) | 
|  | 357 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 358 | /* If we are going to return from the syscall with interrupts | 
|  | 359 | * off, we trace that here. It shouldn't happen though but we | 
|  | 360 | * want to catch the bugger if it does right ? | 
|  | 361 | */ | 
|  | 362 | andi.	r10,r8,MSR_EE | 
|  | 363 | bne+	1f | 
|  | 364 | stw	r3,GPR3(r1) | 
|  | 365 | bl      trace_hardirqs_off | 
|  | 366 | lwz	r3,GPR3(r1) | 
|  | 367 | 1: | 
|  | 368 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 369 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 
| Kumar Gala | 4eaddb4 | 2008-04-09 16:15:40 -0500 | [diff] [blame] | 370 | /* If the process has its own DBCR0 value, load it up.  The internal | 
|  | 371 | debug mode bit tells us that dbcr0 should be loaded. */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 372 | lwz	r0,THREAD+THREAD_DBCR0(r2) | 
| Kumar Gala | 2325f0a | 2008-07-26 05:27:33 +1000 | [diff] [blame] | 373 | andis.	r10,r0,DBCR0_IDM@h | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 374 | bnel-	load_dbcr0 | 
|  | 375 | #endif | 
| Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 376 | #ifdef CONFIG_44x | 
| Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 377 | BEGIN_MMU_FTR_SECTION | 
| Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 378 | lis	r4,icache_44x_need_flush@ha | 
|  | 379 | lwz	r5,icache_44x_need_flush@l(r4) | 
|  | 380 | cmplwi	cr0,r5,0 | 
|  | 381 | bne-	2f | 
|  | 382 | 1: | 
| Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 383 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x) | 
| Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 384 | #endif /* CONFIG_44x */ | 
| Becky Bruce | b64f87c | 2007-11-10 09:17:49 +1100 | [diff] [blame] | 385 | BEGIN_FTR_SECTION | 
|  | 386 | lwarx	r7,0,r1 | 
|  | 387 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 388 | stwcx.	r0,0,r1			/* to clear the reservation */ | 
|  | 389 | lwz	r4,_LINK(r1) | 
|  | 390 | lwz	r5,_CCR(r1) | 
|  | 391 | mtlr	r4 | 
|  | 392 | mtcr	r5 | 
|  | 393 | lwz	r7,_NIP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 394 | FIX_SRR1(r8, r0) | 
|  | 395 | lwz	r2,GPR2(r1) | 
|  | 396 | lwz	r1,GPR1(r1) | 
|  | 397 | mtspr	SPRN_SRR0,r7 | 
|  | 398 | mtspr	SPRN_SRR1,r8 | 
|  | 399 | SYNC | 
|  | 400 | RFI | 
| Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 401 | #ifdef CONFIG_44x | 
|  | 402 | 2:	li	r7,0 | 
|  | 403 | iccci	r0,r0 | 
|  | 404 | stw	r7,icache_44x_need_flush@l(r4) | 
|  | 405 | b	1b | 
|  | 406 | #endif  /* CONFIG_44x */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 407 |  | 
|  | 408 | 66:	li	r3,-ENOSYS | 
|  | 409 | b	ret_from_syscall | 
|  | 410 |  | 
|  | 411 | .globl	ret_from_fork | 
|  | 412 | ret_from_fork: | 
|  | 413 | REST_NVGPRS(r1) | 
|  | 414 | bl	schedule_tail | 
|  | 415 | li	r3,0 | 
|  | 416 | b	ret_from_syscall | 
|  | 417 |  | 
|  | 418 | /* Traced system call support */ | 
|  | 419 | syscall_dotrace: | 
|  | 420 | SAVE_NVGPRS(r1) | 
|  | 421 | li	r0,0xc00 | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 422 | stw	r0,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 423 | addi	r3,r1,STACK_FRAME_OVERHEAD | 
|  | 424 | bl	do_syscall_trace_enter | 
| Roland McGrath | 4f72c42 | 2008-07-27 16:51:03 +1000 | [diff] [blame] | 425 | /* | 
|  | 426 | * Restore argument registers possibly just changed. | 
|  | 427 | * We use the return value of do_syscall_trace_enter | 
|  | 428 | * for call number to look up in the table (r0). | 
|  | 429 | */ | 
|  | 430 | mr	r0,r3 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 431 | lwz	r3,GPR3(r1) | 
|  | 432 | lwz	r4,GPR4(r1) | 
|  | 433 | lwz	r5,GPR5(r1) | 
|  | 434 | lwz	r6,GPR6(r1) | 
|  | 435 | lwz	r7,GPR7(r1) | 
|  | 436 | lwz	r8,GPR8(r1) | 
|  | 437 | REST_NVGPRS(r1) | 
|  | 438 | b	syscall_dotrace_cont | 
|  | 439 |  | 
|  | 440 | syscall_exit_work: | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 441 | andi.	r0,r9,_TIF_RESTOREALL | 
| Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 442 | beq+	0f | 
|  | 443 | REST_NVGPRS(r1) | 
|  | 444 | b	2f | 
|  | 445 | 0:	cmplw	0,r3,r8 | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 446 | blt+	1f | 
|  | 447 | andi.	r0,r9,_TIF_NOERROR | 
|  | 448 | bne-	1f | 
|  | 449 | lwz	r11,_CCR(r1)			/* Load CR */ | 
|  | 450 | neg	r3,r3 | 
|  | 451 | oris	r11,r11,0x1000	/* Set SO bit in CR */ | 
|  | 452 | stw	r11,_CCR(r1) | 
|  | 453 |  | 
|  | 454 | 1:	stw	r6,RESULT(r1)	/* Save result */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 455 | stw	r3,GPR3(r1)	/* Update return value */ | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 456 | 2:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK) | 
|  | 457 | beq	4f | 
|  | 458 |  | 
| Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 459 | /* Clear per-syscall TIF flags if any are set.  */ | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 460 |  | 
|  | 461 | li	r11,_TIF_PERSYSCALL_MASK | 
|  | 462 | addi	r12,r12,TI_FLAGS | 
|  | 463 | 3:	lwarx	r8,0,r12 | 
|  | 464 | andc	r8,r8,r11 | 
|  | 465 | #ifdef CONFIG_IBM405_ERR77 | 
|  | 466 | dcbt	0,r12 | 
|  | 467 | #endif | 
|  | 468 | stwcx.	r8,0,r12 | 
|  | 469 | bne-	3b | 
|  | 470 | subi	r12,r12,TI_FLAGS | 
|  | 471 |  | 
|  | 472 | 4:	/* Anything which requires enabling interrupts? */ | 
| Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 473 | andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | 
|  | 474 | beq	ret_from_except | 
|  | 475 |  | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 476 | /* Re-enable interrupts. There is no need to trace that with | 
|  | 477 | * lockdep as we are supposed to have IRQs on at this point | 
|  | 478 | */ | 
| Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 479 | ori	r10,r10,MSR_EE | 
|  | 480 | SYNC | 
|  | 481 | MTMSRD(r10) | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 482 |  | 
|  | 483 | /* Save NVGPRS if they're not saved already */ | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 484 | lwz	r4,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 485 | andi.	r4,r4,1 | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 486 | beq	5f | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 487 | SAVE_NVGPRS(r1) | 
|  | 488 | li	r4,0xc00 | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 489 | stw	r4,_TRAP(r1) | 
| Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 490 | 5: | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 491 | addi	r3,r1,STACK_FRAME_OVERHEAD | 
|  | 492 | bl	do_syscall_trace_leave | 
| Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 493 | b	ret_from_except_full | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 494 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 495 | #ifdef SHOW_SYSCALLS | 
|  | 496 | do_show_syscall: | 
|  | 497 | #ifdef SHOW_SYSCALLS_TASK | 
|  | 498 | lis	r11,show_syscalls_task@ha | 
|  | 499 | lwz	r11,show_syscalls_task@l(r11) | 
|  | 500 | cmp	0,r2,r11 | 
|  | 501 | bnelr | 
|  | 502 | #endif | 
|  | 503 | stw	r31,GPR31(r1) | 
|  | 504 | mflr	r31 | 
|  | 505 | lis	r3,7f@ha | 
|  | 506 | addi	r3,r3,7f@l | 
|  | 507 | lwz	r4,GPR0(r1) | 
|  | 508 | lwz	r5,GPR3(r1) | 
|  | 509 | lwz	r6,GPR4(r1) | 
|  | 510 | lwz	r7,GPR5(r1) | 
|  | 511 | lwz	r8,GPR6(r1) | 
|  | 512 | lwz	r9,GPR7(r1) | 
|  | 513 | bl	printk | 
|  | 514 | lis	r3,77f@ha | 
|  | 515 | addi	r3,r3,77f@l | 
|  | 516 | lwz	r4,GPR8(r1) | 
|  | 517 | mr	r5,r2 | 
|  | 518 | bl	printk | 
|  | 519 | lwz	r0,GPR0(r1) | 
|  | 520 | lwz	r3,GPR3(r1) | 
|  | 521 | lwz	r4,GPR4(r1) | 
|  | 522 | lwz	r5,GPR5(r1) | 
|  | 523 | lwz	r6,GPR6(r1) | 
|  | 524 | lwz	r7,GPR7(r1) | 
|  | 525 | lwz	r8,GPR8(r1) | 
|  | 526 | mtlr	r31 | 
|  | 527 | lwz	r31,GPR31(r1) | 
|  | 528 | blr | 
|  | 529 |  | 
|  | 530 | do_show_syscall_exit: | 
|  | 531 | #ifdef SHOW_SYSCALLS_TASK | 
|  | 532 | lis	r11,show_syscalls_task@ha | 
|  | 533 | lwz	r11,show_syscalls_task@l(r11) | 
|  | 534 | cmp	0,r2,r11 | 
|  | 535 | bnelr | 
|  | 536 | #endif | 
|  | 537 | stw	r31,GPR31(r1) | 
|  | 538 | mflr	r31 | 
|  | 539 | stw	r3,RESULT(r1)	/* Save result */ | 
|  | 540 | mr	r4,r3 | 
|  | 541 | lis	r3,79f@ha | 
|  | 542 | addi	r3,r3,79f@l | 
|  | 543 | bl	printk | 
|  | 544 | lwz	r3,RESULT(r1) | 
|  | 545 | mtlr	r31 | 
|  | 546 | lwz	r31,GPR31(r1) | 
|  | 547 | blr | 
|  | 548 |  | 
|  | 549 | 7:	.string	"syscall %d(%x, %x, %x, %x, %x, " | 
|  | 550 | 77:	.string	"%x), current=%p\n" | 
|  | 551 | 79:	.string	" -> %x\n" | 
|  | 552 | .align	2,0 | 
|  | 553 |  | 
|  | 554 | #ifdef SHOW_SYSCALLS_TASK | 
|  | 555 | .data | 
|  | 556 | .globl	show_syscalls_task | 
|  | 557 | show_syscalls_task: | 
|  | 558 | .long	-1 | 
|  | 559 | .text | 
|  | 560 | #endif | 
|  | 561 | #endif /* SHOW_SYSCALLS */ | 
|  | 562 |  | 
|  | 563 | /* | 
| David Woodhouse | 401d1f0 | 2005-11-15 18:52:18 +0000 | [diff] [blame] | 564 | * The fork/clone functions need to copy the full register set into | 
|  | 565 | * the child process. Therefore we need to save all the nonvolatile | 
|  | 566 | * registers (r13 - r31) before calling the C code. | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 567 | */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 568 | .globl	ppc_fork | 
|  | 569 | ppc_fork: | 
|  | 570 | SAVE_NVGPRS(r1) | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 571 | lwz	r0,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 572 | rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */ | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 573 | stw	r0,_TRAP(r1)		/* register set saved */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 574 | b	sys_fork | 
|  | 575 |  | 
|  | 576 | .globl	ppc_vfork | 
|  | 577 | ppc_vfork: | 
|  | 578 | SAVE_NVGPRS(r1) | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 579 | lwz	r0,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 580 | rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */ | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 581 | stw	r0,_TRAP(r1)		/* register set saved */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 582 | b	sys_vfork | 
|  | 583 |  | 
|  | 584 | .globl	ppc_clone | 
|  | 585 | ppc_clone: | 
|  | 586 | SAVE_NVGPRS(r1) | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 587 | lwz	r0,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 588 | rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */ | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 589 | stw	r0,_TRAP(r1)		/* register set saved */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 590 | b	sys_clone | 
|  | 591 |  | 
| Paul Mackerras | 1bd7933 | 2006-03-08 13:24:22 +1100 | [diff] [blame] | 592 | .globl	ppc_swapcontext | 
|  | 593 | ppc_swapcontext: | 
|  | 594 | SAVE_NVGPRS(r1) | 
|  | 595 | lwz	r0,_TRAP(r1) | 
|  | 596 | rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */ | 
|  | 597 | stw	r0,_TRAP(r1)		/* register set saved */ | 
|  | 598 | b	sys_swapcontext | 
|  | 599 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 600 | /* | 
|  | 601 | * Top-level page fault handling. | 
|  | 602 | * This is in assembler because if do_page_fault tells us that | 
|  | 603 | * it is a bad kernel page fault, we want to save the non-volatile | 
|  | 604 | * registers before calling bad_page_fault. | 
|  | 605 | */ | 
|  | 606 | .globl	handle_page_fault | 
|  | 607 | handle_page_fault: | 
|  | 608 | stw	r4,_DAR(r1) | 
|  | 609 | addi	r3,r1,STACK_FRAME_OVERHEAD | 
|  | 610 | bl	do_page_fault | 
|  | 611 | cmpwi	r3,0 | 
|  | 612 | beq+	ret_from_except | 
|  | 613 | SAVE_NVGPRS(r1) | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 614 | lwz	r0,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 615 | clrrwi	r0,r0,1 | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 616 | stw	r0,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 617 | mr	r5,r3 | 
|  | 618 | addi	r3,r1,STACK_FRAME_OVERHEAD | 
|  | 619 | lwz	r4,_DAR(r1) | 
|  | 620 | bl	bad_page_fault | 
|  | 621 | b	ret_from_except_full | 
|  | 622 |  | 
|  | 623 | /* | 
|  | 624 | * This routine switches between two different tasks.  The process | 
|  | 625 | * state of one is saved on its kernel stack.  Then the state | 
|  | 626 | * of the other is restored from its kernel stack.  The memory | 
|  | 627 | * management hardware is updated to the second process's state. | 
|  | 628 | * Finally, we can return to the second process. | 
|  | 629 | * On entry, r3 points to the THREAD for the current task, r4 | 
|  | 630 | * points to the THREAD for the new task. | 
|  | 631 | * | 
|  | 632 | * This routine is always called with interrupts disabled. | 
|  | 633 | * | 
|  | 634 | * Note: there are two ways to get to the "going out" portion | 
|  | 635 | * of this code; either by coming in via the entry (_switch) | 
|  | 636 | * or via "fork" which must set up an environment equivalent | 
|  | 637 | * to the "_switch" path.  If you change this , you'll have to | 
|  | 638 | * change the fork code also. | 
|  | 639 | * | 
|  | 640 | * The code which creates the new task context is in 'copy_thread' | 
|  | 641 | * in arch/ppc/kernel/process.c | 
|  | 642 | */ | 
|  | 643 | _GLOBAL(_switch) | 
|  | 644 | stwu	r1,-INT_FRAME_SIZE(r1) | 
|  | 645 | mflr	r0 | 
|  | 646 | stw	r0,INT_FRAME_SIZE+4(r1) | 
|  | 647 | /* r3-r12 are caller saved -- Cort */ | 
|  | 648 | SAVE_NVGPRS(r1) | 
|  | 649 | stw	r0,_NIP(r1)	/* Return to switch caller */ | 
|  | 650 | mfmsr	r11 | 
|  | 651 | li	r0,MSR_FP	/* Disable floating-point */ | 
|  | 652 | #ifdef CONFIG_ALTIVEC | 
|  | 653 | BEGIN_FTR_SECTION | 
|  | 654 | oris	r0,r0,MSR_VEC@h	/* Disable altivec */ | 
|  | 655 | mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */ | 
|  | 656 | stw	r12,THREAD+THREAD_VRSAVE(r2) | 
|  | 657 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 
|  | 658 | #endif /* CONFIG_ALTIVEC */ | 
|  | 659 | #ifdef CONFIG_SPE | 
| Kumar Gala | 5e14d21 | 2007-09-13 01:44:20 -0500 | [diff] [blame] | 660 | BEGIN_FTR_SECTION | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 661 | oris	r0,r0,MSR_SPE@h	 /* Disable SPE */ | 
|  | 662 | mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */ | 
|  | 663 | stw	r12,THREAD+THREAD_SPEFSCR(r2) | 
| Kumar Gala | 5e14d21 | 2007-09-13 01:44:20 -0500 | [diff] [blame] | 664 | END_FTR_SECTION_IFSET(CPU_FTR_SPE) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 665 | #endif /* CONFIG_SPE */ | 
|  | 666 | and.	r0,r0,r11	/* FP or altivec or SPE enabled? */ | 
|  | 667 | beq+	1f | 
|  | 668 | andc	r11,r11,r0 | 
|  | 669 | MTMSRD(r11) | 
|  | 670 | isync | 
|  | 671 | 1:	stw	r11,_MSR(r1) | 
|  | 672 | mfcr	r10 | 
|  | 673 | stw	r10,_CCR(r1) | 
|  | 674 | stw	r1,KSP(r3)	/* Set old stack pointer */ | 
|  | 675 |  | 
|  | 676 | #ifdef CONFIG_SMP | 
|  | 677 | /* We need a sync somewhere here to make sure that if the | 
|  | 678 | * previous task gets rescheduled on another CPU, it sees all | 
|  | 679 | * stores it has performed on this one. | 
|  | 680 | */ | 
|  | 681 | sync | 
|  | 682 | #endif /* CONFIG_SMP */ | 
|  | 683 |  | 
|  | 684 | tophys(r0,r4) | 
|  | 685 | CLR_TOP32(r0) | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 686 | mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 687 | lwz	r1,KSP(r4)	/* Load new stack pointer */ | 
|  | 688 |  | 
|  | 689 | /* save the old current 'last' for return value */ | 
|  | 690 | mr	r3,r2 | 
|  | 691 | addi	r2,r4,-THREAD	/* Update current */ | 
|  | 692 |  | 
|  | 693 | #ifdef CONFIG_ALTIVEC | 
|  | 694 | BEGIN_FTR_SECTION | 
|  | 695 | lwz	r0,THREAD+THREAD_VRSAVE(r2) | 
|  | 696 | mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */ | 
|  | 697 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 
|  | 698 | #endif /* CONFIG_ALTIVEC */ | 
|  | 699 | #ifdef CONFIG_SPE | 
| Kumar Gala | 5e14d21 | 2007-09-13 01:44:20 -0500 | [diff] [blame] | 700 | BEGIN_FTR_SECTION | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 701 | lwz	r0,THREAD+THREAD_SPEFSCR(r2) | 
|  | 702 | mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */ | 
| Kumar Gala | 5e14d21 | 2007-09-13 01:44:20 -0500 | [diff] [blame] | 703 | END_FTR_SECTION_IFSET(CPU_FTR_SPE) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 704 | #endif /* CONFIG_SPE */ | 
|  | 705 |  | 
|  | 706 | lwz	r0,_CCR(r1) | 
|  | 707 | mtcrf	0xFF,r0 | 
|  | 708 | /* r3-r12 are destroyed -- Cort */ | 
|  | 709 | REST_NVGPRS(r1) | 
|  | 710 |  | 
|  | 711 | lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */ | 
|  | 712 | mtlr	r4 | 
|  | 713 | addi	r1,r1,INT_FRAME_SIZE | 
|  | 714 | blr | 
|  | 715 |  | 
|  | 716 | .globl	fast_exception_return | 
|  | 717 | fast_exception_return: | 
|  | 718 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 
|  | 719 | andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */ | 
|  | 720 | beq	1f			/* if not, we've got problems */ | 
|  | 721 | #endif | 
|  | 722 |  | 
|  | 723 | 2:	REST_4GPRS(3, r11) | 
|  | 724 | lwz	r10,_CCR(r11) | 
|  | 725 | REST_GPR(1, r11) | 
|  | 726 | mtcr	r10 | 
|  | 727 | lwz	r10,_LINK(r11) | 
|  | 728 | mtlr	r10 | 
|  | 729 | REST_GPR(10, r11) | 
|  | 730 | mtspr	SPRN_SRR1,r9 | 
|  | 731 | mtspr	SPRN_SRR0,r12 | 
|  | 732 | REST_GPR(9, r11) | 
|  | 733 | REST_GPR(12, r11) | 
|  | 734 | lwz	r11,GPR11(r11) | 
|  | 735 | SYNC | 
|  | 736 | RFI | 
|  | 737 |  | 
|  | 738 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 
|  | 739 | /* check if the exception happened in a restartable section */ | 
|  | 740 | 1:	lis	r3,exc_exit_restart_end@ha | 
|  | 741 | addi	r3,r3,exc_exit_restart_end@l | 
|  | 742 | cmplw	r12,r3 | 
|  | 743 | bge	3f | 
|  | 744 | lis	r4,exc_exit_restart@ha | 
|  | 745 | addi	r4,r4,exc_exit_restart@l | 
|  | 746 | cmplw	r12,r4 | 
|  | 747 | blt	3f | 
|  | 748 | lis	r3,fee_restarts@ha | 
|  | 749 | tophys(r3,r3) | 
|  | 750 | lwz	r5,fee_restarts@l(r3) | 
|  | 751 | addi	r5,r5,1 | 
|  | 752 | stw	r5,fee_restarts@l(r3) | 
|  | 753 | mr	r12,r4		/* restart at exc_exit_restart */ | 
|  | 754 | b	2b | 
|  | 755 |  | 
| Kumar Gala | 991eb43 | 2007-05-14 17:11:58 -0500 | [diff] [blame] | 756 | .section .bss | 
|  | 757 | .align	2 | 
|  | 758 | fee_restarts: | 
|  | 759 | .space	4 | 
|  | 760 | .previous | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 761 |  | 
|  | 762 | /* aargh, a nonrecoverable interrupt, panic */ | 
|  | 763 | /* aargh, we don't know which trap this is */ | 
|  | 764 | /* but the 601 doesn't implement the RI bit, so assume it's OK */ | 
|  | 765 | 3: | 
|  | 766 | BEGIN_FTR_SECTION | 
|  | 767 | b	2b | 
|  | 768 | END_FTR_SECTION_IFSET(CPU_FTR_601) | 
|  | 769 | li	r10,-1 | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 770 | stw	r10,_TRAP(r11) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 771 | addi	r3,r1,STACK_FRAME_OVERHEAD | 
|  | 772 | lis	r10,MSR_KERNEL@h | 
|  | 773 | ori	r10,r10,MSR_KERNEL@l | 
|  | 774 | bl	transfer_to_handler_full | 
|  | 775 | .long	nonrecoverable_exception | 
|  | 776 | .long	ret_from_except | 
|  | 777 | #endif | 
|  | 778 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 779 | .globl	ret_from_except_full | 
|  | 780 | ret_from_except_full: | 
|  | 781 | REST_NVGPRS(r1) | 
|  | 782 | /* fall through */ | 
|  | 783 |  | 
|  | 784 | .globl	ret_from_except | 
|  | 785 | ret_from_except: | 
|  | 786 | /* Hard-disable interrupts so that current_thread_info()->flags | 
|  | 787 | * can't change between when we test it and when we return | 
|  | 788 | * from the interrupt. */ | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 789 | /* Note: We don't bother telling lockdep about it */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 790 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 
|  | 791 | SYNC			/* Some chip revs have problems here... */ | 
|  | 792 | MTMSRD(r10)		/* disable interrupts */ | 
|  | 793 |  | 
|  | 794 | lwz	r3,_MSR(r1)	/* Returning to user mode? */ | 
|  | 795 | andi.	r0,r3,MSR_PR | 
|  | 796 | beq	resume_kernel | 
|  | 797 |  | 
|  | 798 | user_exc_return:		/* r10 contains MSR_KERNEL here */ | 
|  | 799 | /* Check current_thread_info()->flags */ | 
| David Gibson | 6cb7bfe | 2005-10-21 15:45:50 +1000 | [diff] [blame] | 800 | rlwinm	r9,r1,0,0,(31-THREAD_SHIFT) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 801 | lwz	r9,TI_FLAGS(r9) | 
| Roland McGrath | 7a10174 | 2008-04-28 17:30:37 +1000 | [diff] [blame] | 802 | andi.	r0,r9,_TIF_USER_WORK_MASK | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 803 | bne	do_work | 
|  | 804 |  | 
|  | 805 | restore_user: | 
|  | 806 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 
| Kumar Gala | 4eaddb4 | 2008-04-09 16:15:40 -0500 | [diff] [blame] | 807 | /* Check whether this process has its own DBCR0 value.  The internal | 
|  | 808 | debug mode bit tells us that dbcr0 should be loaded. */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 809 | lwz	r0,THREAD+THREAD_DBCR0(r2) | 
| Kumar Gala | 2325f0a | 2008-07-26 05:27:33 +1000 | [diff] [blame] | 810 | andis.	r10,r0,DBCR0_IDM@h | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 811 | bnel-	load_dbcr0 | 
|  | 812 | #endif | 
|  | 813 |  | 
|  | 814 | #ifdef CONFIG_PREEMPT | 
|  | 815 | b	restore | 
|  | 816 |  | 
|  | 817 | /* N.B. the only way to get here is from the beq following ret_from_except. */ | 
|  | 818 | resume_kernel: | 
|  | 819 | /* check current_thread_info->preempt_count */ | 
| David Gibson | 6cb7bfe | 2005-10-21 15:45:50 +1000 | [diff] [blame] | 820 | rlwinm	r9,r1,0,0,(31-THREAD_SHIFT) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 821 | lwz	r0,TI_PREEMPT(r9) | 
|  | 822 | cmpwi	0,r0,0		/* if non-zero, just restore regs and return */ | 
|  | 823 | bne	restore | 
|  | 824 | lwz	r0,TI_FLAGS(r9) | 
|  | 825 | andi.	r0,r0,_TIF_NEED_RESCHED | 
|  | 826 | beq+	restore | 
|  | 827 | andi.	r0,r3,MSR_EE	/* interrupts off? */ | 
|  | 828 | beq	restore		/* don't schedule if so */ | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 829 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 830 | /* Lockdep thinks irqs are enabled, we need to call | 
|  | 831 | * preempt_schedule_irq with IRQs off, so we inform lockdep | 
|  | 832 | * now that we -did- turn them off already | 
|  | 833 | */ | 
|  | 834 | bl	trace_hardirqs_off | 
|  | 835 | #endif | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 836 | 1:	bl	preempt_schedule_irq | 
| David Gibson | 6cb7bfe | 2005-10-21 15:45:50 +1000 | [diff] [blame] | 837 | rlwinm	r9,r1,0,0,(31-THREAD_SHIFT) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 838 | lwz	r3,TI_FLAGS(r9) | 
|  | 839 | andi.	r0,r3,_TIF_NEED_RESCHED | 
|  | 840 | bne-	1b | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 841 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 842 | /* And now, to properly rebalance the above, we tell lockdep they | 
|  | 843 | * are being turned back on, which will happen when we return | 
|  | 844 | */ | 
|  | 845 | bl	trace_hardirqs_on | 
|  | 846 | #endif | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 847 | #else | 
|  | 848 | resume_kernel: | 
|  | 849 | #endif /* CONFIG_PREEMPT */ | 
|  | 850 |  | 
|  | 851 | /* interrupts are hard-disabled at this point */ | 
|  | 852 | restore: | 
| Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 853 | #ifdef CONFIG_44x | 
| Dave Kleikamp | e7f75ad | 2010-03-05 10:43:12 +0000 | [diff] [blame] | 854 | BEGIN_MMU_FTR_SECTION | 
|  | 855 | b	1f | 
|  | 856 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) | 
| Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 857 | lis	r4,icache_44x_need_flush@ha | 
|  | 858 | lwz	r5,icache_44x_need_flush@l(r4) | 
|  | 859 | cmplwi	cr0,r5,0 | 
|  | 860 | beq+	1f | 
|  | 861 | li	r6,0 | 
|  | 862 | iccci	r0,r0 | 
|  | 863 | stw	r6,icache_44x_need_flush@l(r4) | 
|  | 864 | 1: | 
|  | 865 | #endif  /* CONFIG_44x */ | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 866 |  | 
|  | 867 | lwz	r9,_MSR(r1) | 
|  | 868 | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | 869 | /* Lockdep doesn't know about the fact that IRQs are temporarily turned | 
|  | 870 | * off in this assembly code while peeking at TI_FLAGS() and such. However | 
|  | 871 | * we need to inform it if the exception turned interrupts off, and we | 
|  | 872 | * are about to trun them back on. | 
|  | 873 | * | 
|  | 874 | * The problem here sadly is that we don't know whether the exceptions was | 
|  | 875 | * one that turned interrupts off or not. So we always tell lockdep about | 
|  | 876 | * turning them on here when we go back to wherever we came from with EE | 
|  | 877 | * on, even if that may meen some redudant calls being tracked. Maybe later | 
|  | 878 | * we could encode what the exception did somewhere or test the exception | 
|  | 879 | * type in the pt_regs but that sounds overkill | 
|  | 880 | */ | 
|  | 881 | andi.	r10,r9,MSR_EE | 
|  | 882 | beq	1f | 
| Steven Rostedt | 06ca218 | 2010-12-22 16:42:56 +0000 | [diff] [blame] | 883 | /* | 
|  | 884 | * Since the ftrace irqsoff latency trace checks CALLER_ADDR1, | 
|  | 885 | * which is the stack frame here, we need to force a stack frame | 
|  | 886 | * in case we came from user space. | 
|  | 887 | */ | 
|  | 888 | stwu	r1,-32(r1) | 
|  | 889 | mflr	r0 | 
|  | 890 | stw	r0,4(r1) | 
|  | 891 | stwu	r1,-32(r1) | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 892 | bl	trace_hardirqs_on | 
| Steven Rostedt | 06ca218 | 2010-12-22 16:42:56 +0000 | [diff] [blame] | 893 | lwz	r1,0(r1) | 
|  | 894 | lwz	r1,0(r1) | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 895 | lwz	r9,_MSR(r1) | 
|  | 896 | 1: | 
|  | 897 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 
|  | 898 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 899 | lwz	r0,GPR0(r1) | 
|  | 900 | lwz	r2,GPR2(r1) | 
|  | 901 | REST_4GPRS(3, r1) | 
|  | 902 | REST_2GPRS(7, r1) | 
|  | 903 |  | 
|  | 904 | lwz	r10,_XER(r1) | 
|  | 905 | lwz	r11,_CTR(r1) | 
|  | 906 | mtspr	SPRN_XER,r10 | 
|  | 907 | mtctr	r11 | 
|  | 908 |  | 
|  | 909 | PPC405_ERR77(0,r1) | 
| Becky Bruce | b64f87c | 2007-11-10 09:17:49 +1100 | [diff] [blame] | 910 | BEGIN_FTR_SECTION | 
|  | 911 | lwarx	r11,0,r1 | 
|  | 912 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 913 | stwcx.	r0,0,r1			/* to clear the reservation */ | 
|  | 914 |  | 
|  | 915 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 916 | andi.	r10,r9,MSR_RI		/* check if this exception occurred */ | 
|  | 917 | beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */ | 
|  | 918 |  | 
|  | 919 | lwz	r10,_CCR(r1) | 
|  | 920 | lwz	r11,_LINK(r1) | 
|  | 921 | mtcrf	0xFF,r10 | 
|  | 922 | mtlr	r11 | 
|  | 923 |  | 
|  | 924 | /* | 
|  | 925 | * Once we put values in SRR0 and SRR1, we are in a state | 
|  | 926 | * where exceptions are not recoverable, since taking an | 
|  | 927 | * exception will trash SRR0 and SRR1.  Therefore we clear the | 
|  | 928 | * MSR:RI bit to indicate this.  If we do take an exception, | 
|  | 929 | * we can't return to the point of the exception but we | 
|  | 930 | * can restart the exception exit path at the label | 
|  | 931 | * exc_exit_restart below.  -- paulus | 
|  | 932 | */ | 
|  | 933 | LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI) | 
|  | 934 | SYNC | 
|  | 935 | MTMSRD(r10)		/* clear the RI bit */ | 
|  | 936 | .globl exc_exit_restart | 
|  | 937 | exc_exit_restart: | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 938 | lwz	r12,_NIP(r1) | 
|  | 939 | FIX_SRR1(r9,r10) | 
|  | 940 | mtspr	SPRN_SRR0,r12 | 
|  | 941 | mtspr	SPRN_SRR1,r9 | 
|  | 942 | REST_4GPRS(9, r1) | 
|  | 943 | lwz	r1,GPR1(r1) | 
|  | 944 | .globl exc_exit_restart_end | 
|  | 945 | exc_exit_restart_end: | 
|  | 946 | SYNC | 
|  | 947 | RFI | 
|  | 948 |  | 
|  | 949 | #else /* !(CONFIG_4xx || CONFIG_BOOKE) */ | 
|  | 950 | /* | 
|  | 951 | * This is a bit different on 4xx/Book-E because it doesn't have | 
|  | 952 | * the RI bit in the MSR. | 
|  | 953 | * The TLB miss handler checks if we have interrupted | 
|  | 954 | * the exception exit path and restarts it if so | 
|  | 955 | * (well maybe one day it will... :). | 
|  | 956 | */ | 
|  | 957 | lwz	r11,_LINK(r1) | 
|  | 958 | mtlr	r11 | 
|  | 959 | lwz	r10,_CCR(r1) | 
|  | 960 | mtcrf	0xff,r10 | 
|  | 961 | REST_2GPRS(9, r1) | 
|  | 962 | .globl exc_exit_restart | 
|  | 963 | exc_exit_restart: | 
|  | 964 | lwz	r11,_NIP(r1) | 
|  | 965 | lwz	r12,_MSR(r1) | 
|  | 966 | exc_exit_start: | 
|  | 967 | mtspr	SPRN_SRR0,r11 | 
|  | 968 | mtspr	SPRN_SRR1,r12 | 
|  | 969 | REST_2GPRS(11, r1) | 
|  | 970 | lwz	r1,GPR1(r1) | 
|  | 971 | .globl exc_exit_restart_end | 
|  | 972 | exc_exit_restart_end: | 
|  | 973 | PPC405_ERR77_SYNC | 
|  | 974 | rfi | 
|  | 975 | b	.			/* prevent prefetch past rfi */ | 
|  | 976 |  | 
|  | 977 | /* | 
|  | 978 | * Returning from a critical interrupt in user mode doesn't need | 
|  | 979 | * to be any different from a normal exception.  For a critical | 
|  | 980 | * interrupt in the kernel, we just return (without checking for | 
|  | 981 | * preemption) since the interrupt may have happened at some crucial | 
|  | 982 | * place (e.g. inside the TLB miss handler), and because we will be | 
|  | 983 | * running with r1 pointing into critical_stack, not the current | 
|  | 984 | * process's kernel stack (and therefore current_thread_info() will | 
|  | 985 | * give the wrong answer). | 
|  | 986 | * We have to restore various SPRs that may have been in use at the | 
|  | 987 | * time of the critical interrupt. | 
|  | 988 | * | 
|  | 989 | */ | 
|  | 990 | #ifdef CONFIG_40x | 
|  | 991 | #define PPC_40x_TURN_OFF_MSR_DR						    \ | 
|  | 992 | /* avoid any possible TLB misses here by turning off MSR.DR, we	    \ | 
|  | 993 | * assume the instructions here are mapped by a pinned TLB entry */ \ | 
|  | 994 | li	r10,MSR_IR;						    \ | 
|  | 995 | mtmsr	r10;							    \ | 
|  | 996 | isync;								    \ | 
|  | 997 | tophys(r1, r1); | 
|  | 998 | #else | 
|  | 999 | #define PPC_40x_TURN_OFF_MSR_DR | 
|  | 1000 | #endif | 
|  | 1001 |  | 
|  | 1002 | #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\ | 
|  | 1003 | REST_NVGPRS(r1);						\ | 
|  | 1004 | lwz	r3,_MSR(r1);						\ | 
|  | 1005 | andi.	r3,r3,MSR_PR;						\ | 
|  | 1006 | LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\ | 
|  | 1007 | bne	user_exc_return;					\ | 
|  | 1008 | lwz	r0,GPR0(r1);						\ | 
|  | 1009 | lwz	r2,GPR2(r1);						\ | 
|  | 1010 | REST_4GPRS(3, r1);						\ | 
|  | 1011 | REST_2GPRS(7, r1);						\ | 
|  | 1012 | lwz	r10,_XER(r1);						\ | 
|  | 1013 | lwz	r11,_CTR(r1);						\ | 
|  | 1014 | mtspr	SPRN_XER,r10;						\ | 
|  | 1015 | mtctr	r11;							\ | 
|  | 1016 | PPC405_ERR77(0,r1);						\ | 
|  | 1017 | stwcx.	r0,0,r1;		/* to clear the reservation */	\ | 
|  | 1018 | lwz	r11,_LINK(r1);						\ | 
|  | 1019 | mtlr	r11;							\ | 
|  | 1020 | lwz	r10,_CCR(r1);						\ | 
|  | 1021 | mtcrf	0xff,r10;						\ | 
|  | 1022 | PPC_40x_TURN_OFF_MSR_DR;					\ | 
|  | 1023 | lwz	r9,_DEAR(r1);						\ | 
|  | 1024 | lwz	r10,_ESR(r1);						\ | 
|  | 1025 | mtspr	SPRN_DEAR,r9;						\ | 
|  | 1026 | mtspr	SPRN_ESR,r10;						\ | 
|  | 1027 | lwz	r11,_NIP(r1);						\ | 
|  | 1028 | lwz	r12,_MSR(r1);						\ | 
|  | 1029 | mtspr	exc_lvl_srr0,r11;					\ | 
|  | 1030 | mtspr	exc_lvl_srr1,r12;					\ | 
|  | 1031 | lwz	r9,GPR9(r1);						\ | 
|  | 1032 | lwz	r12,GPR12(r1);						\ | 
|  | 1033 | lwz	r10,GPR10(r1);						\ | 
|  | 1034 | lwz	r11,GPR11(r1);						\ | 
|  | 1035 | lwz	r1,GPR1(r1);						\ | 
|  | 1036 | PPC405_ERR77_SYNC;						\ | 
|  | 1037 | exc_lvl_rfi;							\ | 
|  | 1038 | b	.;		/* prevent prefetch past exc_lvl_rfi */ | 
|  | 1039 |  | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1040 | #define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\ | 
|  | 1041 | lwz	r9,_##exc_lvl_srr0(r1);					\ | 
|  | 1042 | lwz	r10,_##exc_lvl_srr1(r1);				\ | 
|  | 1043 | mtspr	SPRN_##exc_lvl_srr0,r9;					\ | 
|  | 1044 | mtspr	SPRN_##exc_lvl_srr1,r10; | 
|  | 1045 |  | 
| Kumar Gala | 70fe3af | 2009-02-12 16:12:40 -0600 | [diff] [blame] | 1046 | #if defined(CONFIG_PPC_BOOK3E_MMU) | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1047 | #ifdef CONFIG_PHYS_64BIT | 
|  | 1048 | #define	RESTORE_MAS7							\ | 
|  | 1049 | lwz	r11,MAS7(r1);						\ | 
|  | 1050 | mtspr	SPRN_MAS7,r11; | 
|  | 1051 | #else | 
|  | 1052 | #define	RESTORE_MAS7 | 
|  | 1053 | #endif /* CONFIG_PHYS_64BIT */ | 
|  | 1054 | #define RESTORE_MMU_REGS						\ | 
|  | 1055 | lwz	r9,MAS0(r1);						\ | 
|  | 1056 | lwz	r10,MAS1(r1);						\ | 
|  | 1057 | lwz	r11,MAS2(r1);						\ | 
|  | 1058 | mtspr	SPRN_MAS0,r9;						\ | 
|  | 1059 | lwz	r9,MAS3(r1);						\ | 
|  | 1060 | mtspr	SPRN_MAS1,r10;						\ | 
|  | 1061 | lwz	r10,MAS6(r1);						\ | 
|  | 1062 | mtspr	SPRN_MAS2,r11;						\ | 
|  | 1063 | mtspr	SPRN_MAS3,r9;						\ | 
|  | 1064 | mtspr	SPRN_MAS6,r10;						\ | 
|  | 1065 | RESTORE_MAS7; | 
|  | 1066 | #elif defined(CONFIG_44x) | 
|  | 1067 | #define RESTORE_MMU_REGS						\ | 
|  | 1068 | lwz	r9,MMUCR(r1);						\ | 
|  | 1069 | mtspr	SPRN_MMUCR,r9; | 
|  | 1070 | #else | 
|  | 1071 | #define RESTORE_MMU_REGS | 
|  | 1072 | #endif | 
|  | 1073 |  | 
|  | 1074 | #ifdef CONFIG_40x | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1075 | .globl	ret_from_crit_exc | 
|  | 1076 | ret_from_crit_exc: | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 1077 | mfspr	r9,SPRN_SPRG_THREAD | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1078 | lis	r10,saved_ksp_limit@ha; | 
|  | 1079 | lwz	r10,saved_ksp_limit@l(r10); | 
|  | 1080 | tovirt(r9,r9); | 
|  | 1081 | stw	r10,KSP_LIMIT(r9) | 
|  | 1082 | lis	r9,crit_srr0@ha; | 
|  | 1083 | lwz	r9,crit_srr0@l(r9); | 
|  | 1084 | lis	r10,crit_srr1@ha; | 
|  | 1085 | lwz	r10,crit_srr1@l(r10); | 
|  | 1086 | mtspr	SPRN_SRR0,r9; | 
|  | 1087 | mtspr	SPRN_SRR1,r10; | 
| Kumar Gala | 16c57b3 | 2009-02-10 20:10:44 +0000 | [diff] [blame] | 1088 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1089 | #endif /* CONFIG_40x */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1090 |  | 
|  | 1091 | #ifdef CONFIG_BOOKE | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1092 | .globl	ret_from_crit_exc | 
|  | 1093 | ret_from_crit_exc: | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 1094 | mfspr	r9,SPRN_SPRG_THREAD | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1095 | lwz	r10,SAVED_KSP_LIMIT(r1) | 
|  | 1096 | stw	r10,KSP_LIMIT(r9) | 
|  | 1097 | RESTORE_xSRR(SRR0,SRR1); | 
|  | 1098 | RESTORE_MMU_REGS; | 
| Kumar Gala | 16c57b3 | 2009-02-10 20:10:44 +0000 | [diff] [blame] | 1099 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1100 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1101 | .globl	ret_from_debug_exc | 
|  | 1102 | ret_from_debug_exc: | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 1103 | mfspr	r9,SPRN_SPRG_THREAD | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1104 | lwz	r10,SAVED_KSP_LIMIT(r1) | 
|  | 1105 | stw	r10,KSP_LIMIT(r9) | 
|  | 1106 | lwz	r9,THREAD_INFO-THREAD(r9) | 
|  | 1107 | rlwinm	r10,r1,0,0,(31-THREAD_SHIFT) | 
|  | 1108 | lwz	r10,TI_PREEMPT(r10) | 
|  | 1109 | stw	r10,TI_PREEMPT(r9) | 
|  | 1110 | RESTORE_xSRR(SRR0,SRR1); | 
|  | 1111 | RESTORE_xSRR(CSRR0,CSRR1); | 
|  | 1112 | RESTORE_MMU_REGS; | 
| Kumar Gala | 16c57b3 | 2009-02-10 20:10:44 +0000 | [diff] [blame] | 1113 | RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1114 |  | 
|  | 1115 | .globl	ret_from_mcheck_exc | 
|  | 1116 | ret_from_mcheck_exc: | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 1117 | mfspr	r9,SPRN_SPRG_THREAD | 
| Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 1118 | lwz	r10,SAVED_KSP_LIMIT(r1) | 
|  | 1119 | stw	r10,KSP_LIMIT(r9) | 
|  | 1120 | RESTORE_xSRR(SRR0,SRR1); | 
|  | 1121 | RESTORE_xSRR(CSRR0,CSRR1); | 
|  | 1122 | RESTORE_xSRR(DSRR0,DSRR1); | 
|  | 1123 | RESTORE_MMU_REGS; | 
| Kumar Gala | 16c57b3 | 2009-02-10 20:10:44 +0000 | [diff] [blame] | 1124 | RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1125 | #endif /* CONFIG_BOOKE */ | 
|  | 1126 |  | 
|  | 1127 | /* | 
|  | 1128 | * Load the DBCR0 value for a task that is being ptraced, | 
|  | 1129 | * having first saved away the global DBCR0.  Note that r0 | 
|  | 1130 | * has the dbcr0 value to set upon entry to this. | 
|  | 1131 | */ | 
|  | 1132 | load_dbcr0: | 
|  | 1133 | mfmsr	r10		/* first disable debug exceptions */ | 
|  | 1134 | rlwinm	r10,r10,0,~MSR_DE | 
|  | 1135 | mtmsr	r10 | 
|  | 1136 | isync | 
|  | 1137 | mfspr	r10,SPRN_DBCR0 | 
|  | 1138 | lis	r11,global_dbcr0@ha | 
|  | 1139 | addi	r11,r11,global_dbcr0@l | 
| Kumar Gala | 4eaddb4 | 2008-04-09 16:15:40 -0500 | [diff] [blame] | 1140 | #ifdef CONFIG_SMP | 
|  | 1141 | rlwinm	r9,r1,0,0,(31-THREAD_SHIFT) | 
|  | 1142 | lwz	r9,TI_CPU(r9) | 
|  | 1143 | slwi	r9,r9,3 | 
|  | 1144 | add	r11,r11,r9 | 
|  | 1145 | #endif | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1146 | stw	r10,0(r11) | 
|  | 1147 | mtspr	SPRN_DBCR0,r0 | 
|  | 1148 | lwz	r10,4(r11) | 
|  | 1149 | addi	r10,r10,1 | 
|  | 1150 | stw	r10,4(r11) | 
|  | 1151 | li	r11,-1 | 
|  | 1152 | mtspr	SPRN_DBSR,r11	/* clear all pending debug events */ | 
|  | 1153 | blr | 
|  | 1154 |  | 
| Kumar Gala | 991eb43 | 2007-05-14 17:11:58 -0500 | [diff] [blame] | 1155 | .section .bss | 
|  | 1156 | .align	4 | 
|  | 1157 | global_dbcr0: | 
| Kumar Gala | 4eaddb4 | 2008-04-09 16:15:40 -0500 | [diff] [blame] | 1158 | .space	8*NR_CPUS | 
| Kumar Gala | 991eb43 | 2007-05-14 17:11:58 -0500 | [diff] [blame] | 1159 | .previous | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1160 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ | 
|  | 1161 |  | 
|  | 1162 | do_work:			/* r10 contains MSR_KERNEL here */ | 
|  | 1163 | andi.	r0,r9,_TIF_NEED_RESCHED | 
|  | 1164 | beq	do_user_signal | 
|  | 1165 |  | 
|  | 1166 | do_resched:			/* r10 contains MSR_KERNEL here */ | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 1167 | /* Note: We don't need to inform lockdep that we are enabling | 
|  | 1168 | * interrupts here. As far as it knows, they are already enabled | 
|  | 1169 | */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1170 | ori	r10,r10,MSR_EE | 
|  | 1171 | SYNC | 
|  | 1172 | MTMSRD(r10)		/* hard-enable interrupts */ | 
|  | 1173 | bl	schedule | 
|  | 1174 | recheck: | 
| Benjamin Herrenschmidt | 5d38902 | 2009-06-17 17:43:59 +0000 | [diff] [blame] | 1175 | /* Note: And we don't tell it we are disabling them again | 
|  | 1176 | * neither. Those disable/enable cycles used to peek at | 
|  | 1177 | * TI_FLAGS aren't advertised. | 
|  | 1178 | */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1179 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 
|  | 1180 | SYNC | 
|  | 1181 | MTMSRD(r10)		/* disable interrupts */ | 
| David Gibson | 6cb7bfe | 2005-10-21 15:45:50 +1000 | [diff] [blame] | 1182 | rlwinm	r9,r1,0,0,(31-THREAD_SHIFT) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1183 | lwz	r9,TI_FLAGS(r9) | 
|  | 1184 | andi.	r0,r9,_TIF_NEED_RESCHED | 
|  | 1185 | bne-	do_resched | 
| Roland McGrath | 7a10174 | 2008-04-28 17:30:37 +1000 | [diff] [blame] | 1186 | andi.	r0,r9,_TIF_USER_WORK_MASK | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1187 | beq	restore_user | 
|  | 1188 | do_user_signal:			/* r10 contains MSR_KERNEL here */ | 
|  | 1189 | ori	r10,r10,MSR_EE | 
|  | 1190 | SYNC | 
|  | 1191 | MTMSRD(r10)		/* hard-enable interrupts */ | 
|  | 1192 | /* save r13-r31 in the exception frame, if not already done */ | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 1193 | lwz	r3,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1194 | andi.	r0,r3,1 | 
|  | 1195 | beq	2f | 
|  | 1196 | SAVE_NVGPRS(r1) | 
|  | 1197 | rlwinm	r3,r3,0,0,30 | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 1198 | stw	r3,_TRAP(r1) | 
| Roland McGrath | 7d6d637 | 2008-07-27 16:52:52 +1000 | [diff] [blame] | 1199 | 2:	addi	r3,r1,STACK_FRAME_OVERHEAD | 
|  | 1200 | mr	r4,r9 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1201 | bl	do_signal | 
|  | 1202 | REST_NVGPRS(r1) | 
|  | 1203 | b	recheck | 
|  | 1204 |  | 
|  | 1205 | /* | 
|  | 1206 | * We come here when we are at the end of handling an exception | 
|  | 1207 | * that occurred at a place where taking an exception will lose | 
|  | 1208 | * state information, such as the contents of SRR0 and SRR1. | 
|  | 1209 | */ | 
|  | 1210 | nonrecoverable: | 
|  | 1211 | lis	r10,exc_exit_restart_end@ha | 
|  | 1212 | addi	r10,r10,exc_exit_restart_end@l | 
|  | 1213 | cmplw	r12,r10 | 
|  | 1214 | bge	3f | 
|  | 1215 | lis	r11,exc_exit_restart@ha | 
|  | 1216 | addi	r11,r11,exc_exit_restart@l | 
|  | 1217 | cmplw	r12,r11 | 
|  | 1218 | blt	3f | 
|  | 1219 | lis	r10,ee_restarts@ha | 
|  | 1220 | lwz	r12,ee_restarts@l(r10) | 
|  | 1221 | addi	r12,r12,1 | 
|  | 1222 | stw	r12,ee_restarts@l(r10) | 
|  | 1223 | mr	r12,r11		/* restart at exc_exit_restart */ | 
|  | 1224 | blr | 
|  | 1225 | 3:	/* OK, we can't recover, kill this process */ | 
|  | 1226 | /* but the 601 doesn't implement the RI bit, so assume it's OK */ | 
|  | 1227 | BEGIN_FTR_SECTION | 
|  | 1228 | blr | 
|  | 1229 | END_FTR_SECTION_IFSET(CPU_FTR_601) | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 1230 | lwz	r3,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1231 | andi.	r0,r3,1 | 
|  | 1232 | beq	4f | 
|  | 1233 | SAVE_NVGPRS(r1) | 
|  | 1234 | rlwinm	r3,r3,0,0,30 | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 1235 | stw	r3,_TRAP(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1236 | 4:	addi	r3,r1,STACK_FRAME_OVERHEAD | 
|  | 1237 | bl	nonrecoverable_exception | 
|  | 1238 | /* shouldn't return */ | 
|  | 1239 | b	4b | 
|  | 1240 |  | 
| Kumar Gala | 991eb43 | 2007-05-14 17:11:58 -0500 | [diff] [blame] | 1241 | .section .bss | 
|  | 1242 | .align	2 | 
|  | 1243 | ee_restarts: | 
|  | 1244 | .space	4 | 
|  | 1245 | .previous | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1246 |  | 
|  | 1247 | /* | 
|  | 1248 | * PROM code for specific machines follows.  Put it | 
|  | 1249 | * here so it's easy to add arch-specific sections later. | 
|  | 1250 | * -- Cort | 
|  | 1251 | */ | 
| Paul Mackerras | 033ef33 | 2005-10-26 17:05:24 +1000 | [diff] [blame] | 1252 | #ifdef CONFIG_PPC_RTAS | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1253 | /* | 
|  | 1254 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | 
|  | 1255 | * called with the MMU off. | 
|  | 1256 | */ | 
|  | 1257 | _GLOBAL(enter_rtas) | 
|  | 1258 | stwu	r1,-INT_FRAME_SIZE(r1) | 
|  | 1259 | mflr	r0 | 
|  | 1260 | stw	r0,INT_FRAME_SIZE+4(r1) | 
| David Gibson | e58c349 | 2006-01-13 14:56:25 +1100 | [diff] [blame] | 1261 | LOAD_REG_ADDR(r4, rtas) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1262 | lis	r6,1f@ha	/* physical return address for rtas */ | 
|  | 1263 | addi	r6,r6,1f@l | 
|  | 1264 | tophys(r6,r6) | 
|  | 1265 | tophys(r7,r1) | 
| Paul Mackerras | 033ef33 | 2005-10-26 17:05:24 +1000 | [diff] [blame] | 1266 | lwz	r8,RTASENTRY(r4) | 
|  | 1267 | lwz	r4,RTASBASE(r4) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1268 | mfmsr	r9 | 
|  | 1269 | stw	r9,8(r1) | 
|  | 1270 | LOAD_MSR_KERNEL(r0,MSR_KERNEL) | 
|  | 1271 | SYNC			/* disable interrupts so SRR0/1 */ | 
|  | 1272 | MTMSRD(r0)		/* don't get trashed */ | 
|  | 1273 | li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) | 
|  | 1274 | mtlr	r6 | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 1275 | mtspr	SPRN_SPRG_RTAS,r7 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1276 | mtspr	SPRN_SRR0,r8 | 
|  | 1277 | mtspr	SPRN_SRR1,r9 | 
|  | 1278 | RFI | 
|  | 1279 | 1:	tophys(r9,r1) | 
|  | 1280 | lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */ | 
|  | 1281 | lwz	r9,8(r9)	/* original msr value */ | 
|  | 1282 | FIX_SRR1(r9,r0) | 
|  | 1283 | addi	r1,r1,INT_FRAME_SIZE | 
|  | 1284 | li	r0,0 | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 1285 | mtspr	SPRN_SPRG_RTAS,r0 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1286 | mtspr	SPRN_SRR0,r8 | 
|  | 1287 | mtspr	SPRN_SRR1,r9 | 
|  | 1288 | RFI			/* return to caller */ | 
|  | 1289 |  | 
|  | 1290 | .globl	machine_check_in_rtas | 
|  | 1291 | machine_check_in_rtas: | 
|  | 1292 | twi	31,0,0 | 
|  | 1293 | /* XXX load up BATs and panic */ | 
|  | 1294 |  | 
| Paul Mackerras | 033ef33 | 2005-10-26 17:05:24 +1000 | [diff] [blame] | 1295 | #endif /* CONFIG_PPC_RTAS */ | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1296 |  | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 1297 | #ifdef CONFIG_FUNCTION_TRACER | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1298 | #ifdef CONFIG_DYNAMIC_FTRACE | 
|  | 1299 | _GLOBAL(mcount) | 
|  | 1300 | _GLOBAL(_mcount) | 
| Steven Rostedt | c7b0d173 | 2008-11-20 13:18:55 -0800 | [diff] [blame] | 1301 | /* | 
|  | 1302 | * It is required that _mcount on PPC32 must preserve the | 
|  | 1303 | * link register. But we have r0 to play with. We use r0 | 
|  | 1304 | * to push the return address back to the caller of mcount | 
|  | 1305 | * into the ctr register, restore the link register and | 
|  | 1306 | * then jump back using the ctr register. | 
|  | 1307 | */ | 
|  | 1308 | mflr	r0 | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1309 | mtctr	r0 | 
| Steven Rostedt | c7b0d173 | 2008-11-20 13:18:55 -0800 | [diff] [blame] | 1310 | lwz	r0, 4(r1) | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1311 | mtlr	r0 | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1312 | bctr | 
|  | 1313 |  | 
|  | 1314 | _GLOBAL(ftrace_caller) | 
| Steven Rostedt | bf528a3 | 2009-02-11 15:01:18 -0500 | [diff] [blame] | 1315 | MCOUNT_SAVE_FRAME | 
|  | 1316 | /* r3 ends up with link register */ | 
| Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 1317 | subi	r3, r3, MCOUNT_INSN_SIZE | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1318 | .globl ftrace_call | 
|  | 1319 | ftrace_call: | 
|  | 1320 | bl	ftrace_stub | 
|  | 1321 | nop | 
| Steven Rostedt | 60ce8f7 | 2009-02-11 20:06:43 -0500 | [diff] [blame] | 1322 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
|  | 1323 | .globl ftrace_graph_call | 
|  | 1324 | ftrace_graph_call: | 
|  | 1325 | b	ftrace_graph_stub | 
|  | 1326 | _GLOBAL(ftrace_graph_stub) | 
|  | 1327 | #endif | 
| Steven Rostedt | bf528a3 | 2009-02-11 15:01:18 -0500 | [diff] [blame] | 1328 | MCOUNT_RESTORE_FRAME | 
|  | 1329 | /* old link register ends up in ctr reg */ | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1330 | bctr | 
|  | 1331 | #else | 
|  | 1332 | _GLOBAL(mcount) | 
|  | 1333 | _GLOBAL(_mcount) | 
| Steven Rostedt | bf528a3 | 2009-02-11 15:01:18 -0500 | [diff] [blame] | 1334 |  | 
|  | 1335 | MCOUNT_SAVE_FRAME | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1336 |  | 
| Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 1337 | subi	r3, r3, MCOUNT_INSN_SIZE | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1338 | LOAD_REG_ADDR(r5, ftrace_trace_function) | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1339 | lwz	r5,0(r5) | 
| Steven Rostedt | ccbfac2 | 2008-05-22 14:31:07 -0400 | [diff] [blame] | 1340 |  | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1341 | mtctr	r5 | 
|  | 1342 | bctrl | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1343 | nop | 
|  | 1344 |  | 
| Steven Rostedt | fad4f47 | 2009-02-11 19:10:57 -0500 | [diff] [blame] | 1345 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
|  | 1346 | b	ftrace_graph_caller | 
|  | 1347 | #endif | 
| Steven Rostedt | bf528a3 | 2009-02-11 15:01:18 -0500 | [diff] [blame] | 1348 | MCOUNT_RESTORE_FRAME | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1349 | bctr | 
|  | 1350 | #endif | 
|  | 1351 |  | 
|  | 1352 | _GLOBAL(ftrace_stub) | 
|  | 1353 | blr | 
|  | 1354 |  | 
| Steven Rostedt | fad4f47 | 2009-02-11 19:10:57 -0500 | [diff] [blame] | 1355 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
|  | 1356 | _GLOBAL(ftrace_graph_caller) | 
|  | 1357 | /* load r4 with local address */ | 
|  | 1358 | lwz	r4, 44(r1) | 
|  | 1359 | subi	r4, r4, MCOUNT_INSN_SIZE | 
|  | 1360 |  | 
|  | 1361 | /* get the parent address */ | 
|  | 1362 | addi	r3, r1, 52 | 
|  | 1363 |  | 
|  | 1364 | bl	prepare_ftrace_return | 
|  | 1365 | nop | 
|  | 1366 |  | 
|  | 1367 | MCOUNT_RESTORE_FRAME | 
|  | 1368 | /* old link register ends up in ctr reg */ | 
|  | 1369 | bctr | 
|  | 1370 |  | 
|  | 1371 | _GLOBAL(return_to_handler) | 
|  | 1372 | /* need to save return values */ | 
|  | 1373 | stwu	r1, -32(r1) | 
|  | 1374 | stw	r3, 20(r1) | 
|  | 1375 | stw	r4, 16(r1) | 
|  | 1376 | stw	r31, 12(r1) | 
|  | 1377 | mr	r31, r1 | 
|  | 1378 |  | 
|  | 1379 | bl	ftrace_return_to_handler | 
|  | 1380 | nop | 
|  | 1381 |  | 
|  | 1382 | /* return value has real return address */ | 
|  | 1383 | mtlr	r3 | 
|  | 1384 |  | 
|  | 1385 | lwz	r3, 20(r1) | 
|  | 1386 | lwz	r4, 16(r1) | 
|  | 1387 | lwz	r31,12(r1) | 
|  | 1388 | lwz	r1, 0(r1) | 
|  | 1389 |  | 
|  | 1390 | /* Jump back to real return address */ | 
|  | 1391 | blr | 
|  | 1392 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 
|  | 1393 |  | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 1394 | #endif /* CONFIG_MCOUNT */ |