| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * arch/xtensa/kernel/entry.S | 
 | 3 |  * | 
 | 4 |  * Low-level exception handling | 
 | 5 |  * | 
 | 6 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 7 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 8 |  * for more details. | 
 | 9 |  * | 
| Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 10 |  * Copyright (C) 2004-2007 by Tensilica Inc. | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 11 |  * | 
 | 12 |  * Chris Zankel <chris@zankel.net> | 
 | 13 |  * | 
 | 14 |  */ | 
 | 15 |  | 
 | 16 | #include <linux/linkage.h> | 
| Sam Ravnborg | 0013a85 | 2005-09-09 20:57:26 +0200 | [diff] [blame] | 17 | #include <asm/asm-offsets.h> | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 18 | #include <asm/processor.h> | 
| Chris Zankel | 4573e39 | 2010-05-02 01:05:13 -0700 | [diff] [blame] | 19 | #include <asm/coprocessor.h> | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 20 | #include <asm/thread_info.h> | 
 | 21 | #include <asm/uaccess.h> | 
 | 22 | #include <asm/unistd.h> | 
 | 23 | #include <asm/ptrace.h> | 
 | 24 | #include <asm/current.h> | 
 | 25 | #include <asm/pgtable.h> | 
 | 26 | #include <asm/page.h> | 
 | 27 | #include <asm/signal.h> | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 28 | #include <asm/tlbflush.h> | 
| Chris Zankel | 367b811 | 2008-11-06 06:40:46 -0800 | [diff] [blame] | 29 | #include <variant/tie-asm.h> | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 30 |  | 
 | 31 | /* Unimplemented features. */ | 
 | 32 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 33 | #undef KERNEL_STACK_OVERFLOW_CHECK | 
 | 34 | #undef PREEMPTIBLE_KERNEL | 
 | 35 | #undef ALLOCA_EXCEPTION_IN_IRAM | 
 | 36 |  | 
 | 37 | /* Not well tested. | 
 | 38 |  * | 
 | 39 |  * - fast_coprocessor | 
 | 40 |  */ | 
 | 41 |  | 
 | 42 | /* | 
 | 43 |  * Macro to find first bit set in WINDOWBASE from the left + 1 | 
 | 44 |  * | 
 | 45 |  * 100....0 -> 1 | 
 | 46 |  * 010....0 -> 2 | 
 | 47 |  * 000....1 -> WSBITS | 
 | 48 |  */ | 
 | 49 |  | 
 | 50 | 	.macro ffs_ws bit mask | 
 | 51 |  | 
 | 52 | #if XCHAL_HAVE_NSA | 
 | 53 | 	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0) | 
 | 54 | 	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1 | 
 | 55 | #else | 
 | 56 | 	movi    \bit, WSBITS | 
 | 57 | #if WSBITS > 16 | 
 | 58 | 	_bltui  \mask, 0x10000, 99f | 
 | 59 | 	addi    \bit, \bit, -16 | 
 | 60 | 	extui   \mask, \mask, 16, 16 | 
 | 61 | #endif | 
 | 62 | #if WSBITS > 8 | 
 | 63 | 99:	_bltui  \mask, 0x100, 99f | 
 | 64 | 	addi    \bit, \bit, -8 | 
 | 65 | 	srli    \mask, \mask, 8 | 
 | 66 | #endif | 
 | 67 | 99:	_bltui  \mask, 0x10, 99f | 
 | 68 | 	addi    \bit, \bit, -4 | 
 | 69 | 	srli    \mask, \mask, 4 | 
 | 70 | 99:	_bltui  \mask, 0x4, 99f | 
 | 71 | 	addi    \bit, \bit, -2 | 
 | 72 | 	srli    \mask, \mask, 2 | 
 | 73 | 99:	_bltui  \mask, 0x2, 99f | 
 | 74 | 	addi    \bit, \bit, -1 | 
 | 75 | 99: | 
 | 76 |  | 
 | 77 | #endif | 
 | 78 | 	.endm | 
 | 79 |  | 
 | 80 | /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ | 
 | 81 |  | 
 | 82 | /* | 
 | 83 |  * First-level exception handler for user exceptions. | 
 | 84 |  * Save some special registers, extra states and all registers in the AR | 
 | 85 |  * register file that were in use in the user task, and jump to the common | 
 | 86 |  * exception code. | 
 | 87 |  * We save SAR (used to calculate WMASK), and WB and WS (we don't have to | 
 | 88 |  * save them for kernel exceptions). | 
 | 89 |  * | 
 | 90 |  * Entry condition for user_exception: | 
 | 91 |  * | 
 | 92 |  *   a0:	trashed, original value saved on stack (PT_AREG0) | 
 | 93 |  *   a1:	a1 | 
 | 94 |  *   a2:	new stack pointer, original value in depc | 
 | 95 |  *   a3:	dispatch table | 
 | 96 |  *   depc:	a2, original value saved on stack (PT_DEPC) | 
 | 97 |  *   excsave1:	a3 | 
 | 98 |  * | 
 | 99 |  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 
 | 100 |  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 
 | 101 |  * | 
 | 102 |  * Entry condition for _user_exception: | 
 | 103 |  * | 
 | 104 |  *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC | 
 | 105 |  *   excsave has been restored, and | 
 | 106 |  *   stack pointer (a1) has been set. | 
 | 107 |  * | 
| Daniel Mack | 3ad2f3f | 2010-02-03 08:01:28 +0800 | [diff] [blame] | 108 |  * Note: _user_exception might be at an odd address. Don't use call0..call12 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 109 |  */ | 
 | 110 |  | 
 | 111 | ENTRY(user_exception) | 
 | 112 |  | 
 | 113 | 	/* Save a2, a3, and depc, restore excsave_1 and set SP. */ | 
 | 114 |  | 
 | 115 | 	xsr	a3, EXCSAVE_1 | 
 | 116 | 	rsr	a0, DEPC | 
 | 117 | 	s32i	a1, a2, PT_AREG1 | 
 | 118 | 	s32i	a0, a2, PT_AREG2 | 
 | 119 | 	s32i	a3, a2, PT_AREG3 | 
 | 120 | 	mov	a1, a2 | 
 | 121 |  | 
 | 122 | 	.globl _user_exception | 
 | 123 | _user_exception: | 
 | 124 |  | 
 | 125 | 	/* Save SAR and turn off single stepping */ | 
 | 126 |  | 
 | 127 | 	movi	a2, 0 | 
 | 128 | 	rsr	a3, SAR | 
| Chris Zankel | 29c4dfd | 2007-05-31 17:49:32 -0700 | [diff] [blame] | 129 | 	xsr	a2, ICOUNTLEVEL | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 130 | 	s32i	a3, a1, PT_SAR | 
| Chris Zankel | 29c4dfd | 2007-05-31 17:49:32 -0700 | [diff] [blame] | 131 | 	s32i	a2, a1, PT_ICOUNTLEVEL | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 132 |  | 
 | 133 | 	/* Rotate ws so that the current windowbase is at bit0. */ | 
 | 134 | 	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ | 
 | 135 |  | 
 | 136 | 	rsr	a2, WINDOWBASE | 
 | 137 | 	rsr	a3, WINDOWSTART | 
 | 138 | 	ssr	a2 | 
 | 139 | 	s32i	a2, a1, PT_WINDOWBASE | 
 | 140 | 	s32i	a3, a1, PT_WINDOWSTART | 
 | 141 | 	slli	a2, a3, 32-WSBITS | 
 | 142 | 	src	a2, a3, a2 | 
 | 143 | 	srli	a2, a2, 32-WSBITS | 
 | 144 | 	s32i	a2, a1, PT_WMASK	# needed for restoring registers | 
 | 145 |  | 
 | 146 | 	/* Save only live registers. */ | 
 | 147 |  | 
 | 148 | 	_bbsi.l	a2, 1, 1f | 
 | 149 | 	s32i	a4, a1, PT_AREG4 | 
 | 150 | 	s32i	a5, a1, PT_AREG5 | 
 | 151 | 	s32i	a6, a1, PT_AREG6 | 
 | 152 | 	s32i	a7, a1, PT_AREG7 | 
 | 153 | 	_bbsi.l	a2, 2, 1f | 
 | 154 | 	s32i	a8, a1, PT_AREG8 | 
 | 155 | 	s32i	a9, a1, PT_AREG9 | 
 | 156 | 	s32i	a10, a1, PT_AREG10 | 
 | 157 | 	s32i	a11, a1, PT_AREG11 | 
 | 158 | 	_bbsi.l	a2, 3, 1f | 
 | 159 | 	s32i	a12, a1, PT_AREG12 | 
 | 160 | 	s32i	a13, a1, PT_AREG13 | 
 | 161 | 	s32i	a14, a1, PT_AREG14 | 
 | 162 | 	s32i	a15, a1, PT_AREG15 | 
 | 163 | 	_bnei	a2, 1, 1f		# only one valid frame? | 
 | 164 |  | 
 | 165 | 	/* Only one valid frame, skip saving regs. */ | 
 | 166 |  | 
 | 167 | 	j	2f | 
 | 168 |  | 
 | 169 | 	/* Save the remaining registers. | 
 | 170 | 	 * We have to save all registers up to the first '1' from | 
 | 171 | 	 * the right, except the current frame (bit 0). | 
 | 172 | 	 * Assume a2 is:  001001000110001 | 
| Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 173 | 	 * All register frames starting from the top field to the marked '1' | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 174 | 	 * must be saved. | 
 | 175 | 	 */ | 
 | 176 |  | 
 | 177 | 1:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0 | 
 | 178 | 	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1 | 
 | 179 | 	and	a3, a3, a2		# max. only one bit is set | 
 | 180 |  | 
 | 181 | 	/* Find number of frames to save */ | 
 | 182 |  | 
 | 183 | 	ffs_ws	a0, a3			# number of frames to the '1' from left | 
 | 184 |  | 
 | 185 | 	/* Store information into WMASK: | 
 | 186 | 	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, | 
 | 187 | 	 * bits 4...: number of valid 4-register frames | 
 | 188 | 	 */ | 
 | 189 |  | 
 | 190 | 	slli	a3, a0, 4		# number of frames to save in bits 8..4 | 
 | 191 | 	extui	a2, a2, 0, 4		# mask for the first 16 registers | 
 | 192 | 	or	a2, a3, a2 | 
 | 193 | 	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file | 
 | 194 |  | 
 | 195 | 	/* Save 4 registers at a time */ | 
 | 196 |  | 
 | 197 | 1:	rotw	-1 | 
 | 198 | 	s32i	a0, a5, PT_AREG_END - 16 | 
 | 199 | 	s32i	a1, a5, PT_AREG_END - 12 | 
 | 200 | 	s32i	a2, a5, PT_AREG_END - 8 | 
 | 201 | 	s32i	a3, a5, PT_AREG_END - 4 | 
 | 202 | 	addi	a0, a4, -1 | 
 | 203 | 	addi	a1, a5, -16 | 
 | 204 | 	_bnez	a0, 1b | 
 | 205 |  | 
 | 206 | 	/* WINDOWBASE still in SAR! */ | 
 | 207 |  | 
 | 208 | 	rsr	a2, SAR			# original WINDOWBASE | 
 | 209 | 	movi	a3, 1 | 
 | 210 | 	ssl	a2 | 
 | 211 | 	sll	a3, a3 | 
 | 212 | 	wsr	a3, WINDOWSTART		# set corresponding WINDOWSTART bit | 
 | 213 | 	wsr	a2, WINDOWBASE		# and WINDOWSTART | 
 | 214 | 	rsync | 
 | 215 |  | 
 | 216 | 	/* We are back to the original stack pointer (a1) */ | 
 | 217 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 218 | 2:	/* Now, jump to the common exception handler. */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 219 |  | 
 | 220 | 	j	common_exception | 
 | 221 |  | 
 | 222 |  | 
 | 223 | /* | 
 | 224 |  * First-level exit handler for kernel exceptions | 
 | 225 |  * Save special registers and the live window frame. | 
 | 226 |  * Note: Even though we changes the stack pointer, we don't have to do a | 
 | 227 |  *	 MOVSP here, as we do that when we return from the exception. | 
 | 228 |  *	 (See comment in the kernel exception exit code) | 
 | 229 |  * | 
 | 230 |  * Entry condition for kernel_exception: | 
 | 231 |  * | 
 | 232 |  *   a0:	trashed, original value saved on stack (PT_AREG0) | 
 | 233 |  *   a1:	a1 | 
 | 234 |  *   a2:	new stack pointer, original in DEPC | 
 | 235 |  *   a3:	dispatch table | 
 | 236 |  *   depc:	a2, original value saved on stack (PT_DEPC) | 
 | 237 |  *   excsave_1:	a3 | 
 | 238 |  * | 
 | 239 |  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 
 | 240 |  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 
 | 241 |  * | 
 | 242 |  * Entry condition for _kernel_exception: | 
 | 243 |  * | 
 | 244 |  *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC | 
 | 245 |  *   excsave has been restored, and | 
 | 246 |  *   stack pointer (a1) has been set. | 
 | 247 |  * | 
| Daniel Mack | 3ad2f3f | 2010-02-03 08:01:28 +0800 | [diff] [blame] | 248 |  * Note: _kernel_exception might be at an odd address. Don't use call0..call12 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 249 |  */ | 
 | 250 |  | 
 | 251 | ENTRY(kernel_exception) | 
 | 252 |  | 
 | 253 | 	/* Save a0, a2, a3, DEPC and set SP. */ | 
 | 254 |  | 
 | 255 | 	xsr	a3, EXCSAVE_1		# restore a3, excsave_1 | 
 | 256 | 	rsr	a0, DEPC		# get a2 | 
 | 257 | 	s32i	a1, a2, PT_AREG1 | 
 | 258 | 	s32i	a0, a2, PT_AREG2 | 
 | 259 | 	s32i	a3, a2, PT_AREG3 | 
 | 260 | 	mov	a1, a2 | 
 | 261 |  | 
 | 262 | 	.globl _kernel_exception | 
 | 263 | _kernel_exception: | 
 | 264 |  | 
 | 265 | 	/* Save SAR and turn off single stepping */ | 
 | 266 |  | 
 | 267 | 	movi	a2, 0 | 
 | 268 | 	rsr	a3, SAR | 
| Chris Zankel | 29c4dfd | 2007-05-31 17:49:32 -0700 | [diff] [blame] | 269 | 	xsr	a2, ICOUNTLEVEL | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 270 | 	s32i	a3, a1, PT_SAR | 
| Chris Zankel | 29c4dfd | 2007-05-31 17:49:32 -0700 | [diff] [blame] | 271 | 	s32i	a2, a1, PT_ICOUNTLEVEL | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 272 |  | 
 | 273 | 	/* Rotate ws so that the current windowbase is at bit0. */ | 
 | 274 | 	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ | 
 | 275 |  | 
 | 276 | 	rsr	a2, WINDOWBASE		# don't need to save these, we only | 
 | 277 | 	rsr	a3, WINDOWSTART		# need shifted windowstart: windowmask | 
 | 278 | 	ssr	a2 | 
 | 279 | 	slli	a2, a3, 32-WSBITS | 
 | 280 | 	src	a2, a3, a2 | 
 | 281 | 	srli	a2, a2, 32-WSBITS | 
 | 282 | 	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit | 
 | 283 |  | 
 | 284 | 	/* Save only the live window-frame */ | 
 | 285 |  | 
 | 286 | 	_bbsi.l	a2, 1, 1f | 
 | 287 | 	s32i	a4, a1, PT_AREG4 | 
 | 288 | 	s32i	a5, a1, PT_AREG5 | 
 | 289 | 	s32i	a6, a1, PT_AREG6 | 
 | 290 | 	s32i	a7, a1, PT_AREG7 | 
 | 291 | 	_bbsi.l	a2, 2, 1f | 
 | 292 | 	s32i	a8, a1, PT_AREG8 | 
 | 293 | 	s32i	a9, a1, PT_AREG9 | 
 | 294 | 	s32i	a10, a1, PT_AREG10 | 
 | 295 | 	s32i	a11, a1, PT_AREG11 | 
 | 296 | 	_bbsi.l	a2, 3, 1f | 
 | 297 | 	s32i	a12, a1, PT_AREG12 | 
 | 298 | 	s32i	a13, a1, PT_AREG13 | 
 | 299 | 	s32i	a14, a1, PT_AREG14 | 
 | 300 | 	s32i	a15, a1, PT_AREG15 | 
 | 301 |  | 
 | 302 | 1: | 
 | 303 |  | 
 | 304 | #ifdef KERNEL_STACK_OVERFLOW_CHECK | 
 | 305 |  | 
 | 306 | 	/*  Stack overflow check, for debugging  */ | 
 | 307 | 	extui	a2, a1, TASK_SIZE_BITS,XX | 
 | 308 | 	movi	a3, SIZE?? | 
 | 309 | 	_bge	a2, a3, out_of_stack_panic | 
 | 310 |  | 
 | 311 | #endif | 
 | 312 |  | 
 | 313 | /* | 
 | 314 |  * This is the common exception handler. | 
 | 315 |  * We get here from the user exception handler or simply by falling through | 
 | 316 |  * from the kernel exception handler. | 
 | 317 |  * Save the remaining special registers, switch to kernel mode, and jump | 
 | 318 |  * to the second-level exception handler. | 
 | 319 |  * | 
 | 320 |  */ | 
 | 321 |  | 
 | 322 | common_exception: | 
 | 323 |  | 
| Chris Zankel | 29c4dfd | 2007-05-31 17:49:32 -0700 | [diff] [blame] | 324 | 	/* Save some registers, disable loops and clear the syscall flag. */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 325 |  | 
 | 326 | 	rsr	a2, DEBUGCAUSE | 
 | 327 | 	rsr	a3, EPC_1 | 
 | 328 | 	s32i	a2, a1, PT_DEBUGCAUSE | 
 | 329 | 	s32i	a3, a1, PT_PC | 
 | 330 |  | 
| Chris Zankel | 29c4dfd | 2007-05-31 17:49:32 -0700 | [diff] [blame] | 331 | 	movi	a2, -1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 332 | 	rsr	a3, EXCVADDR | 
| Chris Zankel | 29c4dfd | 2007-05-31 17:49:32 -0700 | [diff] [blame] | 333 | 	s32i	a2, a1, PT_SYSCALL | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 334 | 	movi	a2, 0 | 
 | 335 | 	s32i	a3, a1, PT_EXCVADDR | 
 | 336 | 	xsr	a2, LCOUNT | 
 | 337 | 	s32i	a2, a1, PT_LCOUNT | 
 | 338 |  | 
 | 339 | 	/* It is now save to restore the EXC_TABLE_FIXUP variable. */ | 
 | 340 |  | 
 | 341 | 	rsr	a0, EXCCAUSE | 
 | 342 | 	movi	a3, 0 | 
 | 343 | 	rsr	a2, EXCSAVE_1 | 
 | 344 | 	s32i	a0, a1, PT_EXCCAUSE | 
 | 345 | 	s32i	a3, a2, EXC_TABLE_FIXUP | 
 | 346 |  | 
 | 347 | 	/* All unrecoverable states are saved on stack, now, and a1 is valid, | 
 | 348 | 	 * so we can allow exceptions and interrupts (*) again. | 
 | 349 | 	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) | 
 | 350 | 	 * | 
 | 351 | 	 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before | 
 | 352 | 	 *     (interrupts disabled) and if this exception is not an interrupt. | 
 | 353 | 	 */ | 
 | 354 |  | 
 | 355 | 	rsr	a3, PS | 
 | 356 | 	addi	a0, a0, -4 | 
 | 357 | 	movi	a2, 1 | 
 | 358 | 	extui	a3, a3, 0, 1		# a3 = PS.INTLEVEL[0] | 
 | 359 | 	moveqz	a3, a2, a0		# a3 = 1 iff interrupt exception | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 360 | 	movi	a2, 1 << PS_WOE_BIT | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 361 | 	or	a3, a3, a2 | 
 | 362 | 	rsr	a0, EXCCAUSE | 
 | 363 | 	xsr	a3, PS | 
 | 364 |  | 
 | 365 | 	s32i	a3, a1, PT_PS		# save ps | 
 | 366 |  | 
 | 367 | 	/* Save LBEG, LEND */ | 
 | 368 |  | 
 | 369 | 	rsr	a2, LBEG | 
 | 370 | 	rsr	a3, LEND | 
 | 371 | 	s32i	a2, a1, PT_LBEG | 
 | 372 | 	s32i	a3, a1, PT_LEND | 
 | 373 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 374 | 	/* Save optional registers. */ | 
 | 375 |  | 
 | 376 | 	save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT | 
 | 377 | 	 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 378 | 	/* Go to second-level dispatcher. Set up parameters to pass to the | 
 | 379 | 	 * exception handler and call the exception handler. | 
 | 380 | 	 */ | 
 | 381 |  | 
 | 382 | 	movi	a4, exc_table | 
 | 383 | 	mov	a6, a1			# pass stack frame | 
 | 384 | 	mov	a7, a0			# pass EXCCAUSE | 
 | 385 | 	addx4	a4, a0, a4 | 
 | 386 | 	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler | 
 | 387 |  | 
 | 388 | 	/* Call the second-level handler */ | 
 | 389 |  | 
 | 390 | 	callx4	a4 | 
 | 391 |  | 
 | 392 | 	/* Jump here for exception exit */ | 
 | 393 |  | 
 | 394 | common_exception_return: | 
 | 395 |  | 
 | 396 | 	/* Jump if we are returning from kernel exceptions. */ | 
 | 397 |  | 
 | 398 | 1:	l32i	a3, a1, PT_PS | 
| Chris Zankel | e108843 | 2008-01-22 00:45:25 -0800 | [diff] [blame] | 399 | 	_bbci.l	a3, PS_UM_BIT, 4f | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 400 |  | 
 | 401 | 	/* Specific to a user exception exit: | 
 | 402 | 	 * We need to check some flags for signal handling and rescheduling, | 
 | 403 | 	 * and have to restore WB and WS, extra states, and all registers | 
 | 404 | 	 * in the register file that were in use in the user task. | 
| Chris Zankel | e108843 | 2008-01-22 00:45:25 -0800 | [diff] [blame] | 405 | 	 * Note that we don't disable interrupts here.  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 406 | 	 */ | 
 | 407 |  | 
 | 408 | 	GET_THREAD_INFO(a2,a1) | 
 | 409 | 	l32i	a4, a2, TI_FLAGS | 
 | 410 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 411 | 	_bbsi.l	a4, TIF_NEED_RESCHED, 3f | 
 | 412 | 	_bbci.l	a4, TIF_SIGPENDING, 4f | 
 | 413 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 414 | 	l32i	a4, a1, PT_DEPC | 
 | 415 | 	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 416 |  | 
| Chris Zankel | e108843 | 2008-01-22 00:45:25 -0800 | [diff] [blame] | 417 | 	/* Call do_signal() */ | 
 | 418 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 419 | 	movi	a4, do_signal	# int do_signal(struct pt_regs*, sigset_t*) | 
 | 420 | 	mov	a6, a1 | 
 | 421 | 	movi	a7, 0 | 
 | 422 | 	callx4	a4 | 
 | 423 | 	j	1b | 
 | 424 |  | 
| Chris Zankel | e108843 | 2008-01-22 00:45:25 -0800 | [diff] [blame] | 425 | 3:	/* Reschedule */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 426 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 427 | 	movi	a4, schedule	# void schedule (void) | 
 | 428 | 	callx4	a4 | 
 | 429 | 	j	1b | 
 | 430 |  | 
| Chris Zankel | e108843 | 2008-01-22 00:45:25 -0800 | [diff] [blame] | 431 | 4:	/* Restore optional registers. */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 432 |  | 
| Chris Zankel | e108843 | 2008-01-22 00:45:25 -0800 | [diff] [blame] | 433 | 	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT | 
 | 434 |  | 
 | 435 | 	wsr	a3, PS		/* disable interrupts */ | 
 | 436 |  | 
 | 437 | 	_bbci.l	a3, PS_UM_BIT, kernel_exception_exit | 
 | 438 |  | 
 | 439 | user_exception_exit: | 
 | 440 |  | 
 | 441 | 	/* Restore the state of the task and return from the exception. */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 442 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 443 | 	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ | 
 | 444 |  | 
 | 445 | 	l32i	a2, a1, PT_WINDOWBASE | 
 | 446 | 	l32i	a3, a1, PT_WINDOWSTART | 
 | 447 | 	wsr	a1, DEPC		# use DEPC as temp storage | 
 | 448 | 	wsr	a3, WINDOWSTART		# restore WINDOWSTART | 
 | 449 | 	ssr	a2			# preserve user's WB in the SAR | 
 | 450 | 	wsr	a2, WINDOWBASE		# switch to user's saved WB | 
 | 451 | 	rsync | 
 | 452 | 	rsr	a1, DEPC		# restore stack pointer | 
 | 453 | 	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9) | 
 | 454 | 	rotw	-1			# we restore a4..a7 | 
 | 455 | 	_bltui	a6, 16, 1f		# only have to restore current window? | 
 | 456 |  | 
 | 457 | 	/* The working registers are a0 and a3.  We are restoring to | 
 | 458 | 	 * a4..a7.  Be careful not to destroy what we have just restored. | 
 | 459 | 	 * Note: wmask has the format YYYYM: | 
 | 460 | 	 *       Y: number of registers saved in groups of 4 | 
 | 461 | 	 *       M: 4 bit mask of first 16 registers | 
 | 462 | 	 */ | 
 | 463 |  | 
 | 464 | 	mov	a2, a6 | 
 | 465 | 	mov	a3, a5 | 
 | 466 |  | 
 | 467 | 2:	rotw	-1			# a0..a3 become a4..a7 | 
 | 468 | 	addi	a3, a7, -4*4		# next iteration | 
 | 469 | 	addi	a2, a6, -16		# decrementing Y in WMASK | 
 | 470 | 	l32i	a4, a3, PT_AREG_END + 0 | 
 | 471 | 	l32i	a5, a3, PT_AREG_END + 4 | 
 | 472 | 	l32i	a6, a3, PT_AREG_END + 8 | 
 | 473 | 	l32i	a7, a3, PT_AREG_END + 12 | 
 | 474 | 	_bgeui	a2, 16, 2b | 
 | 475 |  | 
 | 476 | 	/* Clear unrestored registers (don't leak anything to user-land */ | 
 | 477 |  | 
 | 478 | 1:	rsr	a0, WINDOWBASE | 
 | 479 | 	rsr	a3, SAR | 
 | 480 | 	sub	a3, a0, a3 | 
 | 481 | 	beqz	a3, 2f | 
 | 482 | 	extui	a3, a3, 0, WBBITS | 
 | 483 |  | 
 | 484 | 1:	rotw	-1 | 
 | 485 | 	addi	a3, a7, -1 | 
 | 486 | 	movi	a4, 0 | 
 | 487 | 	movi	a5, 0 | 
 | 488 | 	movi	a6, 0 | 
 | 489 | 	movi	a7, 0 | 
 | 490 | 	bgei	a3, 1, 1b | 
 | 491 |  | 
 | 492 | 	/* We are back were we were when we started. | 
 | 493 | 	 * Note: a2 still contains WMASK (if we've returned to the original | 
 | 494 | 	 *	 frame where we had loaded a2), or at least the lower 4 bits | 
 | 495 | 	 *	 (if we have restored WSBITS-1 frames). | 
 | 496 | 	 */ | 
 | 497 |  | 
 | 498 | 2:	j	common_exception_exit | 
 | 499 |  | 
 | 500 | 	/* This is the kernel exception exit. | 
 | 501 | 	 * We avoided to do a MOVSP when we entered the exception, but we | 
 | 502 | 	 * have to do it here. | 
 | 503 | 	 */ | 
 | 504 |  | 
 | 505 | kernel_exception_exit: | 
 | 506 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 507 | #ifdef PREEMPTIBLE_KERNEL | 
 | 508 |  | 
 | 509 | #ifdef CONFIG_PREEMPT | 
 | 510 |  | 
 | 511 | 	/* | 
 | 512 | 	 * Note: We've just returned from a call4, so we have | 
 | 513 | 	 * at least 4 addt'l regs. | 
 | 514 | 	 */ | 
 | 515 |  | 
 | 516 | 	/* Check current_thread_info->preempt_count */ | 
 | 517 |  | 
 | 518 | 	GET_THREAD_INFO(a2) | 
 | 519 | 	l32i	a3, a2, TI_PREEMPT | 
 | 520 | 	bnez	a3, 1f | 
 | 521 |  | 
 | 522 | 	l32i	a2, a2, TI_FLAGS | 
 | 523 |  | 
 | 524 | 1: | 
 | 525 |  | 
 | 526 | #endif | 
 | 527 |  | 
 | 528 | #endif | 
 | 529 |  | 
 | 530 | 	/* Check if we have to do a movsp. | 
 | 531 | 	 * | 
 | 532 | 	 * We only have to do a movsp if the previous window-frame has | 
 | 533 | 	 * been spilled to the *temporary* exception stack instead of the | 
 | 534 | 	 * task's stack. This is the case if the corresponding bit in | 
 | 535 | 	 * WINDOWSTART for the previous window-frame was set before | 
 | 536 | 	 * (not spilled) but is zero now (spilled). | 
 | 537 | 	 * If this bit is zero, all other bits except the one for the | 
 | 538 | 	 * current window frame are also zero. So, we can use a simple test: | 
 | 539 | 	 * 'and' WINDOWSTART and WINDOWSTART-1: | 
 | 540 | 	 * | 
 | 541 | 	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* | 
 | 542 | 	 * | 
 | 543 | 	 * The result is zero only if one bit was set. | 
 | 544 | 	 * | 
 | 545 | 	 * (Note: We might have gone through several task switches before | 
 | 546 | 	 *        we come back to the current task, so WINDOWBASE might be | 
 | 547 | 	 *        different from the time the exception occurred.) | 
 | 548 | 	 */ | 
 | 549 |  | 
 | 550 | 	/* Test WINDOWSTART before and after the exception. | 
 | 551 | 	 * We actually have WMASK, so we only have to test if it is 1 or not. | 
 | 552 | 	 */ | 
 | 553 |  | 
 | 554 | 	l32i	a2, a1, PT_WMASK | 
 | 555 | 	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump | 
 | 556 |  | 
 | 557 | 	/* Test WINDOWSTART now. If spilled, do the movsp */ | 
 | 558 |  | 
 | 559 | 	rsr     a3, WINDOWSTART | 
 | 560 | 	addi	a0, a3, -1 | 
 | 561 | 	and     a3, a3, a0 | 
 | 562 | 	_bnez	a3, common_exception_exit | 
 | 563 |  | 
 | 564 | 	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */ | 
 | 565 |  | 
 | 566 | 	addi    a0, a1, -16 | 
 | 567 | 	l32i    a3, a0, 0 | 
 | 568 | 	l32i    a4, a0, 4 | 
 | 569 | 	s32i    a3, a1, PT_SIZE+0 | 
 | 570 | 	s32i    a4, a1, PT_SIZE+4 | 
 | 571 | 	l32i    a3, a0, 8 | 
 | 572 | 	l32i    a4, a0, 12 | 
 | 573 | 	s32i    a3, a1, PT_SIZE+8 | 
 | 574 | 	s32i    a4, a1, PT_SIZE+12 | 
 | 575 |  | 
 | 576 | 	/* Common exception exit. | 
 | 577 | 	 * We restore the special register and the current window frame, and | 
 | 578 | 	 * return from the exception. | 
 | 579 | 	 * | 
 | 580 | 	 * Note: We expect a2 to hold PT_WMASK | 
 | 581 | 	 */ | 
 | 582 |  | 
 | 583 | common_exception_exit: | 
 | 584 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 585 | 	/* Restore address registers. */ | 
 | 586 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 587 | 	_bbsi.l	a2, 1, 1f | 
 | 588 | 	l32i	a4,  a1, PT_AREG4 | 
 | 589 | 	l32i	a5,  a1, PT_AREG5 | 
 | 590 | 	l32i	a6,  a1, PT_AREG6 | 
 | 591 | 	l32i	a7,  a1, PT_AREG7 | 
 | 592 | 	_bbsi.l	a2, 2, 1f | 
 | 593 | 	l32i	a8,  a1, PT_AREG8 | 
 | 594 | 	l32i	a9,  a1, PT_AREG9 | 
 | 595 | 	l32i	a10, a1, PT_AREG10 | 
 | 596 | 	l32i	a11, a1, PT_AREG11 | 
 | 597 | 	_bbsi.l	a2, 3, 1f | 
 | 598 | 	l32i	a12, a1, PT_AREG12 | 
 | 599 | 	l32i	a13, a1, PT_AREG13 | 
 | 600 | 	l32i	a14, a1, PT_AREG14 | 
 | 601 | 	l32i	a15, a1, PT_AREG15 | 
 | 602 |  | 
 | 603 | 	/* Restore PC, SAR */ | 
 | 604 |  | 
 | 605 | 1:	l32i	a2, a1, PT_PC | 
 | 606 | 	l32i	a3, a1, PT_SAR | 
 | 607 | 	wsr	a2, EPC_1 | 
 | 608 | 	wsr	a3, SAR | 
 | 609 |  | 
 | 610 | 	/* Restore LBEG, LEND, LCOUNT */ | 
 | 611 |  | 
 | 612 | 	l32i	a2, a1, PT_LBEG | 
 | 613 | 	l32i	a3, a1, PT_LEND | 
 | 614 | 	wsr	a2, LBEG | 
 | 615 | 	l32i	a2, a1, PT_LCOUNT | 
 | 616 | 	wsr	a3, LEND | 
 | 617 | 	wsr	a2, LCOUNT | 
 | 618 |  | 
| Chris Zankel | 29c4dfd | 2007-05-31 17:49:32 -0700 | [diff] [blame] | 619 | 	/* We control single stepping through the ICOUNTLEVEL register. */ | 
 | 620 |  | 
 | 621 | 	l32i	a2, a1, PT_ICOUNTLEVEL | 
 | 622 | 	movi	a3, -2 | 
 | 623 | 	wsr	a2, ICOUNTLEVEL | 
 | 624 | 	wsr	a3, ICOUNT | 
 | 625 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 626 | 	/* Check if it was double exception. */ | 
 | 627 |  | 
 | 628 | 	l32i	a0, a1, PT_DEPC | 
 | 629 | 	l32i	a3, a1, PT_AREG3 | 
 | 630 | 	l32i	a2, a1, PT_AREG2 | 
 | 631 | 	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | 
 | 632 |  | 
 | 633 | 	/* Restore a0...a3 and return */ | 
 | 634 |  | 
 | 635 | 	l32i	a0, a1, PT_AREG0 | 
 | 636 | 	l32i	a1, a1, PT_AREG1 | 
 | 637 | 	rfe | 
 | 638 |  | 
 | 639 | 1:	wsr	a0, DEPC | 
 | 640 | 	l32i	a0, a1, PT_AREG0 | 
 | 641 | 	l32i	a1, a1, PT_AREG1 | 
 | 642 | 	rfde | 
 | 643 |  | 
 | 644 | /* | 
 | 645 |  * Debug exception handler. | 
 | 646 |  * | 
 | 647 |  * Currently, we don't support KGDB, so only user application can be debugged. | 
 | 648 |  * | 
 | 649 |  * When we get here,  a0 is trashed and saved to excsave[debuglevel] | 
 | 650 |  */ | 
 | 651 |  | 
 | 652 | ENTRY(debug_exception) | 
 | 653 |  | 
 | 654 | 	rsr	a0, EPS + XCHAL_DEBUGLEVEL | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 655 | 	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 656 |  | 
 | 657 | 	/* Set EPC_1 and EXCCAUSE */ | 
 | 658 |  | 
 | 659 | 	wsr	a2, DEPC		# save a2 temporarily | 
 | 660 | 	rsr	a2, EPC + XCHAL_DEBUGLEVEL | 
 | 661 | 	wsr	a2, EPC_1 | 
 | 662 |  | 
 | 663 | 	movi	a2, EXCCAUSE_MAPPED_DEBUG | 
 | 664 | 	wsr	a2, EXCCAUSE | 
 | 665 |  | 
 | 666 | 	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/ | 
 | 667 |  | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 668 | 	movi	a2, 1 << PS_EXCM_BIT | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 669 | 	or	a2, a0, a2 | 
 | 670 | 	movi	a0, debug_exception	# restore a3, debug jump vector | 
 | 671 | 	wsr	a2, PS | 
 | 672 | 	xsr	a0, EXCSAVE + XCHAL_DEBUGLEVEL | 
 | 673 |  | 
 | 674 | 	/* Switch to kernel/user stack, restore jump vector, and save a0 */ | 
 | 675 |  | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 676 | 	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 677 |  | 
 | 678 | 	addi	a2, a1, -16-PT_SIZE	# assume kernel stack | 
 | 679 | 	s32i	a0, a2, PT_AREG0 | 
 | 680 | 	movi	a0, 0 | 
 | 681 | 	s32i	a1, a2, PT_AREG1 | 
 | 682 | 	s32i	a0, a2, PT_DEPC		# mark it as a regular exception | 
 | 683 | 	xsr	a0, DEPC | 
 | 684 | 	s32i	a3, a2, PT_AREG3 | 
 | 685 | 	s32i	a0, a2, PT_AREG2 | 
 | 686 | 	mov	a1, a2 | 
 | 687 | 	j	_kernel_exception | 
 | 688 |  | 
 | 689 | 2:	rsr	a2, EXCSAVE_1 | 
 | 690 | 	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer | 
 | 691 | 	s32i	a0, a2, PT_AREG0 | 
 | 692 | 	movi	a0, 0 | 
 | 693 | 	s32i	a1, a2, PT_AREG1 | 
 | 694 | 	s32i	a0, a2, PT_DEPC | 
 | 695 | 	xsr	a0, DEPC | 
 | 696 | 	s32i	a3, a2, PT_AREG3 | 
 | 697 | 	s32i	a0, a2, PT_AREG2 | 
 | 698 | 	mov	a1, a2 | 
 | 699 | 	j	_user_exception | 
 | 700 |  | 
 | 701 | 	/* Debug exception while in exception mode. */ | 
 | 702 | 1:	j	1b	// FIXME!! | 
 | 703 |  | 
 | 704 |  | 
 | 705 | /* | 
 | 706 |  * We get here in case of an unrecoverable exception. | 
 | 707 |  * The only thing we can do is to be nice and print a panic message. | 
 | 708 |  * We only produce a single stack frame for panic, so ??? | 
 | 709 |  * | 
 | 710 |  * | 
 | 711 |  * Entry conditions: | 
 | 712 |  * | 
 | 713 |  *   - a0 contains the caller address; original value saved in excsave1. | 
 | 714 |  *   - the original a0 contains a valid return address (backtrace) or 0. | 
 | 715 |  *   - a2 contains a valid stackpointer | 
 | 716 |  * | 
 | 717 |  * Notes: | 
 | 718 |  * | 
 | 719 |  *   - If the stack pointer could be invalid, the caller has to setup a | 
 | 720 |  *     dummy stack pointer (e.g. the stack of the init_task) | 
 | 721 |  * | 
 | 722 |  *   - If the return address could be invalid, the caller has to set it | 
 | 723 |  *     to 0, so the backtrace would stop. | 
 | 724 |  * | 
 | 725 |  */ | 
 | 726 | 	.align 4 | 
 | 727 | unrecoverable_text: | 
 | 728 | 	.ascii "Unrecoverable error in exception handler\0" | 
 | 729 |  | 
 | 730 | ENTRY(unrecoverable_exception) | 
 | 731 |  | 
 | 732 | 	movi	a0, 1 | 
 | 733 | 	movi	a1, 0 | 
 | 734 |  | 
 | 735 | 	wsr	a0, WINDOWSTART | 
 | 736 | 	wsr	a1, WINDOWBASE | 
 | 737 | 	rsync | 
 | 738 |  | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 739 | 	movi	a1, (1 << PS_WOE_BIT) | 1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 740 | 	wsr	a1, PS | 
 | 741 | 	rsync | 
 | 742 |  | 
 | 743 | 	movi	a1, init_task | 
 | 744 | 	movi	a0, 0 | 
 | 745 | 	addi	a1, a1, PT_REGS_OFFSET | 
 | 746 |  | 
 | 747 | 	movi	a4, panic | 
 | 748 | 	movi	a6, unrecoverable_text | 
 | 749 |  | 
 | 750 | 	callx4	a4 | 
 | 751 |  | 
 | 752 | 1:	j	1b | 
 | 753 |  | 
 | 754 |  | 
 | 755 | /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ | 
 | 756 |  | 
 | 757 | /* | 
 | 758 |  * Fast-handler for alloca exceptions | 
 | 759 |  * | 
 | 760 |  *  The ALLOCA handler is entered when user code executes the MOVSP | 
 | 761 |  *  instruction and the caller's frame is not in the register file. | 
 | 762 |  *  In this case, the caller frame's a0..a3 are on the stack just | 
 | 763 |  *  below sp (a1), and this handler moves them. | 
 | 764 |  * | 
 | 765 |  *  For "MOVSP <ar>,<as>" without destination register a1, this routine | 
 | 766 |  *  simply moves the value from <as> to <ar> without moving the save area. | 
 | 767 |  * | 
 | 768 |  * Entry condition: | 
 | 769 |  * | 
 | 770 |  *   a0:	trashed, original value saved on stack (PT_AREG0) | 
 | 771 |  *   a1:	a1 | 
 | 772 |  *   a2:	new stack pointer, original in DEPC | 
 | 773 |  *   a3:	dispatch table | 
 | 774 |  *   depc:	a2, original value saved on stack (PT_DEPC) | 
 | 775 |  *   excsave_1:	a3 | 
 | 776 |  * | 
 | 777 |  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 
 | 778 |  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 
 | 779 |  */ | 
 | 780 |  | 
 | 781 | #if XCHAL_HAVE_BE | 
 | 782 | #define _EXTUI_MOVSP_SRC(ar)	extui ar, ar, 4, 4 | 
 | 783 | #define _EXTUI_MOVSP_DST(ar)	extui ar, ar, 0, 4 | 
 | 784 | #else | 
 | 785 | #define _EXTUI_MOVSP_SRC(ar)	extui ar, ar, 0, 4 | 
 | 786 | #define _EXTUI_MOVSP_DST(ar)	extui ar, ar, 4, 4 | 
 | 787 | #endif | 
 | 788 |  | 
 | 789 | ENTRY(fast_alloca) | 
 | 790 |  | 
 | 791 | 	/* We shouldn't be in a double exception. */ | 
 | 792 |  | 
 | 793 | 	l32i	a0, a2, PT_DEPC | 
 | 794 | 	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double | 
 | 795 |  | 
 | 796 | 	rsr	a0, DEPC		# get a2 | 
 | 797 | 	s32i	a4, a2, PT_AREG4	# save a4 and | 
 | 798 | 	s32i	a0, a2, PT_AREG2	# a2 to stack | 
 | 799 |  | 
 | 800 | 	/* Exit critical section. */ | 
 | 801 |  | 
 | 802 | 	movi	a0, 0 | 
 | 803 | 	s32i	a0, a3, EXC_TABLE_FIXUP | 
 | 804 |  | 
 | 805 | 	/* Restore a3, excsave_1 */ | 
 | 806 |  | 
 | 807 | 	xsr	a3, EXCSAVE_1		# make sure excsave_1 is valid for dbl. | 
 | 808 | 	rsr	a4, EPC_1		# get exception address | 
 | 809 | 	s32i	a3, a2, PT_AREG3	# save a3 to stack | 
 | 810 |  | 
 | 811 | #ifdef ALLOCA_EXCEPTION_IN_IRAM | 
 | 812 | #error	iram not supported | 
 | 813 | #else | 
 | 814 | 	/* Note: l8ui not allowed in IRAM/IROM!! */ | 
 | 815 | 	l8ui	a0, a4, 1		# read as(src) from MOVSP instruction | 
 | 816 | #endif | 
 | 817 | 	movi	a3, .Lmovsp_src | 
 | 818 | 	_EXTUI_MOVSP_SRC(a0)		# extract source register number | 
 | 819 | 	addx8	a3, a0, a3 | 
 | 820 | 	jx	a3 | 
 | 821 |  | 
 | 822 | .Lunhandled_double: | 
 | 823 | 	wsr	a0, EXCSAVE_1 | 
 | 824 | 	movi	a0, unrecoverable_exception | 
 | 825 | 	callx0	a0 | 
 | 826 |  | 
 | 827 | 	.align 8 | 
 | 828 | .Lmovsp_src: | 
 | 829 | 	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8 | 
 | 830 | 	mov	a3, a1;			_j 1f;	.align 8 | 
 | 831 | 	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8 | 
 | 832 | 	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8 | 
 | 833 | 	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8 | 
 | 834 | 	mov	a3, a5;			_j 1f;	.align 8 | 
 | 835 | 	mov	a3, a6;			_j 1f;	.align 8 | 
 | 836 | 	mov	a3, a7;			_j 1f;	.align 8 | 
 | 837 | 	mov	a3, a8;			_j 1f;	.align 8 | 
 | 838 | 	mov	a3, a9;			_j 1f;	.align 8 | 
 | 839 | 	mov	a3, a10;		_j 1f;	.align 8 | 
 | 840 | 	mov	a3, a11;		_j 1f;	.align 8 | 
 | 841 | 	mov	a3, a12;		_j 1f;	.align 8 | 
 | 842 | 	mov	a3, a13;		_j 1f;	.align 8 | 
 | 843 | 	mov	a3, a14;		_j 1f;	.align 8 | 
 | 844 | 	mov	a3, a15;		_j 1f;	.align 8 | 
 | 845 |  | 
 | 846 | 1: | 
 | 847 |  | 
 | 848 | #ifdef ALLOCA_EXCEPTION_IN_IRAM | 
 | 849 | #error	iram not supported | 
 | 850 | #else | 
 | 851 | 	l8ui	a0, a4, 0		# read ar(dst) from MOVSP instruction | 
 | 852 | #endif | 
 | 853 | 	addi	a4, a4, 3		# step over movsp | 
 | 854 | 	_EXTUI_MOVSP_DST(a0)		# extract destination register | 
 | 855 | 	wsr	a4, EPC_1		# save new epc_1 | 
 | 856 |  | 
 | 857 | 	_bnei	a0, 1, 1f		# no 'movsp a1, ax': jump | 
 | 858 |  | 
 | 859 |         /* Move the save area. This implies the use of the L32E | 
 | 860 | 	 * and S32E instructions, because this move must be done with | 
 | 861 | 	 * the user's PS.RING privilege levels, not with ring 0 | 
 | 862 | 	 * (kernel's) privileges currently active with PS.EXCM | 
 | 863 | 	 * set. Note that we have stil registered a fixup routine with the | 
 | 864 | 	 * double exception vector in case a double exception occurs. | 
 | 865 | 	 */ | 
 | 866 |  | 
 | 867 | 	/* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */ | 
 | 868 |  | 
 | 869 | 	l32e	a0, a1, -16 | 
 | 870 | 	l32e	a4, a1, -12 | 
 | 871 | 	s32e	a0, a3, -16 | 
 | 872 | 	s32e	a4, a3, -12 | 
 | 873 | 	l32e	a0, a1, -8 | 
 | 874 | 	l32e	a4, a1, -4 | 
 | 875 | 	s32e	a0, a3, -8 | 
 | 876 | 	s32e	a4, a3, -4 | 
 | 877 |  | 
 | 878 | 	/* Restore stack-pointer and all the other saved registers. */ | 
 | 879 |  | 
 | 880 | 	mov	a1, a3 | 
 | 881 |  | 
 | 882 | 	l32i	a4, a2, PT_AREG4 | 
 | 883 | 	l32i	a3, a2, PT_AREG3 | 
 | 884 | 	l32i	a0, a2, PT_AREG0 | 
 | 885 | 	l32i	a2, a2, PT_AREG2 | 
 | 886 | 	rfe | 
 | 887 |  | 
 | 888 | 	/*  MOVSP <at>,<as>  was invoked with <at> != a1. | 
 | 889 | 	 *  Because the stack pointer is not being modified, | 
 | 890 | 	 *  we should be able to just modify the pointer | 
 | 891 | 	 *  without moving any save area. | 
 | 892 | 	 *  The processor only traps these occurrences if the | 
 | 893 | 	 *  caller window isn't live, so unfortunately we can't | 
 | 894 | 	 *  use this as an alternate trap mechanism. | 
 | 895 | 	 *  So we just do the move.  This requires that we | 
 | 896 | 	 *  resolve the destination register, not just the source, | 
 | 897 | 	 *  so there's some extra work. | 
 | 898 | 	 *  (PERHAPS NOT REALLY NEEDED, BUT CLEANER...) | 
 | 899 | 	 */ | 
 | 900 |  | 
 | 901 | 	/* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */ | 
 | 902 |  | 
 | 903 | 1:	movi	a4, .Lmovsp_dst | 
 | 904 | 	addx8	a4, a0, a4 | 
 | 905 | 	jx	a4 | 
 | 906 |  | 
 | 907 | 	.align 8 | 
 | 908 | .Lmovsp_dst: | 
 | 909 | 	s32i	a3, a2, PT_AREG0;	_j 1f;	.align 8 | 
 | 910 | 	mov	a1, a3;			_j 1f;	.align 8 | 
 | 911 | 	s32i	a3, a2, PT_AREG2;	_j 1f;	.align 8 | 
 | 912 | 	s32i	a3, a2, PT_AREG3;	_j 1f;	.align 8 | 
 | 913 | 	s32i	a3, a2, PT_AREG4;	_j 1f;	.align 8 | 
 | 914 | 	mov	a5, a3;			_j 1f;	.align 8 | 
 | 915 | 	mov	a6, a3;			_j 1f;	.align 8 | 
 | 916 | 	mov	a7, a3;			_j 1f;	.align 8 | 
 | 917 | 	mov	a8, a3;			_j 1f;	.align 8 | 
 | 918 | 	mov	a9, a3;			_j 1f;	.align 8 | 
 | 919 | 	mov	a10, a3;		_j 1f;	.align 8 | 
 | 920 | 	mov	a11, a3;		_j 1f;	.align 8 | 
 | 921 | 	mov	a12, a3;		_j 1f;	.align 8 | 
 | 922 | 	mov	a13, a3;		_j 1f;	.align 8 | 
 | 923 | 	mov	a14, a3;		_j 1f;	.align 8 | 
 | 924 | 	mov	a15, a3;		_j 1f;	.align 8 | 
 | 925 |  | 
 | 926 | 1:	l32i	a4, a2, PT_AREG4 | 
 | 927 | 	l32i	a3, a2, PT_AREG3 | 
 | 928 | 	l32i	a0, a2, PT_AREG0 | 
 | 929 | 	l32i	a2, a2, PT_AREG2 | 
 | 930 | 	rfe | 
 | 931 |  | 
 | 932 |  | 
 | 933 | /* | 
 | 934 |  * fast system calls. | 
 | 935 |  * | 
 | 936 |  * WARNING:  The kernel doesn't save the entire user context before | 
 | 937 |  * handling a fast system call.  These functions are small and short, | 
 | 938 |  * usually offering some functionality not available to user tasks. | 
 | 939 |  * | 
 | 940 |  * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. | 
 | 941 |  * | 
 | 942 |  * Entry condition: | 
 | 943 |  * | 
 | 944 |  *   a0:	trashed, original value saved on stack (PT_AREG0) | 
 | 945 |  *   a1:	a1 | 
 | 946 |  *   a2:	new stack pointer, original in DEPC | 
 | 947 |  *   a3:	dispatch table | 
 | 948 |  *   depc:	a2, original value saved on stack (PT_DEPC) | 
 | 949 |  *   excsave_1:	a3 | 
 | 950 |  */ | 
 | 951 |  | 
 | 952 | ENTRY(fast_syscall_kernel) | 
 | 953 |  | 
 | 954 | 	/* Skip syscall. */ | 
 | 955 |  | 
 | 956 | 	rsr	a0, EPC_1 | 
 | 957 | 	addi	a0, a0, 3 | 
 | 958 | 	wsr	a0, EPC_1 | 
 | 959 |  | 
 | 960 | 	l32i	a0, a2, PT_DEPC | 
 | 961 | 	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable | 
 | 962 |  | 
 | 963 | 	rsr	a0, DEPC			# get syscall-nr | 
 | 964 | 	_beqz	a0, fast_syscall_spill_registers | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 965 | 	_beqi	a0, __NR_xtensa, fast_syscall_xtensa | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 966 |  | 
 | 967 | 	j	kernel_exception | 
 | 968 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 969 | ENTRY(fast_syscall_user) | 
 | 970 |  | 
 | 971 | 	/* Skip syscall. */ | 
 | 972 |  | 
 | 973 | 	rsr	a0, EPC_1 | 
 | 974 | 	addi	a0, a0, 3 | 
 | 975 | 	wsr	a0, EPC_1 | 
 | 976 |  | 
 | 977 | 	l32i	a0, a2, PT_DEPC | 
 | 978 | 	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable | 
 | 979 |  | 
 | 980 | 	rsr	a0, DEPC			# get syscall-nr | 
 | 981 | 	_beqz	a0, fast_syscall_spill_registers | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 982 | 	_beqi	a0, __NR_xtensa, fast_syscall_xtensa | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 983 |  | 
 | 984 | 	j	user_exception | 
 | 985 |  | 
 | 986 | ENTRY(fast_syscall_unrecoverable) | 
 | 987 |  | 
 | 988 |         /* Restore all states. */ | 
 | 989 |  | 
 | 990 |         l32i    a0, a2, PT_AREG0        # restore a0 | 
 | 991 |         xsr     a2, DEPC                # restore a2, depc | 
 | 992 |         rsr     a3, EXCSAVE_1 | 
 | 993 |  | 
 | 994 |         wsr     a0, EXCSAVE_1 | 
 | 995 |         movi    a0, unrecoverable_exception | 
 | 996 |         callx0  a0 | 
 | 997 |  | 
 | 998 |  | 
 | 999 |  | 
 | 1000 | /* | 
 | 1001 |  * sysxtensa syscall handler | 
 | 1002 |  * | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1003 |  * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused); | 
 | 1004 |  * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused); | 
 | 1005 |  * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused); | 
 | 1006 |  * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); | 
 | 1007 |  *        a2            a6                   a3    a4      a5 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1008 |  * | 
 | 1009 |  * Entry condition: | 
 | 1010 |  * | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1011 |  *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0) | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1012 |  *   a1:	a1 | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1013 |  *   a2:	new stack pointer, original in a0 and DEPC | 
 | 1014 |  *   a3:	dispatch table, original in excsave_1 | 
 | 1015 |  *   a4..a15:	unchanged | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1016 |  *   depc:	a2, original value saved on stack (PT_DEPC) | 
 | 1017 |  *   excsave_1:	a3 | 
 | 1018 |  * | 
 | 1019 |  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 
 | 1020 |  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 
 | 1021 |  * | 
 | 1022 |  * Note: we don't have to save a2; a2 holds the return value | 
 | 1023 |  * | 
 | 1024 |  * We use the two macros TRY and CATCH: | 
 | 1025 |  * | 
 | 1026 |  * TRY	 adds an entry to the __ex_table fixup table for the immediately | 
 | 1027 |  *	 following instruction. | 
 | 1028 |  * | 
 | 1029 |  * CATCH catches any exception that occurred at one of the preceeding TRY | 
 | 1030 |  *       statements and continues from there | 
 | 1031 |  * | 
 | 1032 |  * Usage TRY	l32i	a0, a1, 0 | 
 | 1033 |  *		<other code> | 
 | 1034 |  *	 done:	rfe | 
 | 1035 |  *	 CATCH	<set return code> | 
 | 1036 |  *		j done | 
 | 1037 |  */ | 
 | 1038 |  | 
 | 1039 | #define TRY								\ | 
 | 1040 | 	.section __ex_table, "a";					\ | 
 | 1041 | 	.word	66f, 67f;						\ | 
 | 1042 | 	.text;								\ | 
 | 1043 | 66: | 
 | 1044 |  | 
 | 1045 | #define CATCH								\ | 
 | 1046 | 67: | 
 | 1047 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1048 | ENTRY(fast_syscall_xtensa) | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1049 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1050 | 	xsr	a3, EXCSAVE_1		# restore a3, excsave1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1051 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1052 | 	s32i	a7, a2, PT_AREG7	# we need an additional register | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1053 | 	movi	a7, 4			# sizeof(unsigned int) | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1054 | 	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1055 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1056 | 	addi	a6, a6, -1		# assuming SYS_XTENSA_ATOMIC_SET = 1 | 
 | 1057 | 	_bgeui	a6, SYS_XTENSA_COUNT - 1, .Lill | 
 | 1058 | 	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1059 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1060 | 	/* Fall through for ATOMIC_CMP_SWP. */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1061 |  | 
 | 1062 | .Lswp:	/* Atomic compare and swap */ | 
 | 1063 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1064 | TRY	l32i	a0, a3, 0		# read old value | 
 | 1065 | 	bne	a0, a4, 1f		# same as old value? jump | 
 | 1066 | TRY	s32i	a5, a3, 0		# different, modify value | 
 | 1067 | 	l32i	a7, a2, PT_AREG7	# restore a7 | 
 | 1068 | 	l32i	a0, a2, PT_AREG0	# restore a0 | 
 | 1069 | 	movi	a2, 1			# and return 1 | 
 | 1070 | 	addi	a6, a6, 1		# restore a6 (really necessary?) | 
 | 1071 | 	rfe | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1072 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1073 | 1:	l32i	a7, a2, PT_AREG7	# restore a7 | 
 | 1074 | 	l32i	a0, a2, PT_AREG0	# restore a0 | 
 | 1075 | 	movi	a2, 0			# return 0 (note that we cannot set | 
 | 1076 | 	addi	a6, a6, 1		# restore a6 (really necessary?) | 
 | 1077 | 	rfe | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1078 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1079 | .Lnswp:	/* Atomic set, add, and exg_add. */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1080 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1081 | TRY	l32i	a7, a3, 0		# orig | 
 | 1082 | 	add	a0, a4, a7		# + arg | 
 | 1083 | 	moveqz	a0, a4, a6		# set | 
 | 1084 | TRY	s32i	a0, a3, 0		# write new value | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1085 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1086 | 	mov	a0, a2 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1087 | 	mov	a2, a7 | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1088 | 	l32i	a7, a0, PT_AREG7	# restore a7 | 
 | 1089 | 	l32i	a0, a0, PT_AREG0	# restore a0 | 
 | 1090 | 	addi	a6, a6, 1		# restore a6 (really necessary?) | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1091 | 	rfe | 
 | 1092 |  | 
 | 1093 | CATCH | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1094 | .Leac:	l32i	a7, a2, PT_AREG7	# restore a7 | 
 | 1095 | 	l32i	a0, a2, PT_AREG0	# restore a0 | 
 | 1096 | 	movi	a2, -EFAULT | 
 | 1097 | 	rfe | 
 | 1098 |  | 
 | 1099 | .Lill:	l32i	a7, a2, PT_AREG0	# restore a7 | 
 | 1100 | 	l32i	a0, a2, PT_AREG0	# restore a0 | 
 | 1101 | 	movi	a2, -EINVAL | 
 | 1102 | 	rfe | 
 | 1103 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1104 |  | 
 | 1105 |  | 
 | 1106 |  | 
 | 1107 | /* fast_syscall_spill_registers. | 
 | 1108 |  * | 
 | 1109 |  * Entry condition: | 
 | 1110 |  * | 
 | 1111 |  *   a0:	trashed, original value saved on stack (PT_AREG0) | 
 | 1112 |  *   a1:	a1 | 
 | 1113 |  *   a2:	new stack pointer, original in DEPC | 
 | 1114 |  *   a3:	dispatch table | 
 | 1115 |  *   depc:	a2, original value saved on stack (PT_DEPC) | 
 | 1116 |  *   excsave_1:	a3 | 
 | 1117 |  * | 
 | 1118 |  * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1119 |  */ | 
 | 1120 |  | 
 | 1121 | ENTRY(fast_syscall_spill_registers) | 
 | 1122 |  | 
 | 1123 | 	/* Register a FIXUP handler (pass current wb as a parameter) */ | 
 | 1124 |  | 
 | 1125 | 	movi	a0, fast_syscall_spill_registers_fixup | 
 | 1126 | 	s32i	a0, a3, EXC_TABLE_FIXUP | 
 | 1127 | 	rsr	a0, WINDOWBASE | 
 | 1128 | 	s32i	a0, a3, EXC_TABLE_PARAM | 
 | 1129 |  | 
 | 1130 | 	/* Save a3 and SAR on stack. */ | 
 | 1131 |  | 
 | 1132 | 	rsr	a0, SAR | 
 | 1133 | 	xsr	a3, EXCSAVE_1		# restore a3 and excsave_1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1134 | 	s32i	a3, a2, PT_AREG3 | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1135 | 	s32i	a4, a2, PT_AREG4 | 
 | 1136 | 	s32i	a0, a2, PT_AREG5	# store SAR to PT_AREG5 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1137 |  | 
 | 1138 | 	/* The spill routine might clobber a7, a11, and a15. */ | 
 | 1139 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1140 | 	s32i	a7, a2, PT_AREG7 | 
 | 1141 | 	s32i	a11, a2, PT_AREG11 | 
 | 1142 | 	s32i	a15, a2, PT_AREG15 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1143 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1144 | 	call0	_spill_registers	# destroys a3, a4, and SAR | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1145 |  | 
 | 1146 | 	/* Advance PC, restore registers and SAR, and return from exception. */ | 
 | 1147 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1148 | 	l32i	a3, a2, PT_AREG5 | 
 | 1149 | 	l32i	a4, a2, PT_AREG4 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1150 | 	l32i	a0, a2, PT_AREG0 | 
 | 1151 | 	wsr	a3, SAR | 
 | 1152 | 	l32i	a3, a2, PT_AREG3 | 
 | 1153 |  | 
 | 1154 | 	/* Restore clobbered registers. */ | 
 | 1155 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1156 | 	l32i	a7, a2, PT_AREG7 | 
 | 1157 | 	l32i	a11, a2, PT_AREG11 | 
 | 1158 | 	l32i	a15, a2, PT_AREG15 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1159 |  | 
 | 1160 | 	movi	a2, 0 | 
 | 1161 | 	rfe | 
 | 1162 |  | 
 | 1163 | /* Fixup handler. | 
 | 1164 |  * | 
 | 1165 |  * We get here if the spill routine causes an exception, e.g. tlb miss. | 
 | 1166 |  * We basically restore WINDOWBASE and WINDOWSTART to the condition when | 
 | 1167 |  * we entered the spill routine and jump to the user exception handler. | 
 | 1168 |  * | 
 | 1169 |  * a0: value of depc, original value in depc | 
 | 1170 |  * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE | 
 | 1171 |  * a3: exctable, original value in excsave1 | 
 | 1172 |  */ | 
 | 1173 |  | 
 | 1174 | fast_syscall_spill_registers_fixup: | 
 | 1175 |  | 
 | 1176 | 	rsr	a2, WINDOWBASE	# get current windowbase (a2 is saved) | 
 | 1177 | 	xsr	a0, DEPC	# restore depc and a0 | 
 | 1178 | 	ssl	a2		# set shift (32 - WB) | 
 | 1179 |  | 
 | 1180 | 	/* We need to make sure the current registers (a0-a3) are preserved. | 
 | 1181 | 	 * To do this, we simply set the bit for the current window frame | 
 | 1182 | 	 * in WS, so that the exception handlers save them to the task stack. | 
 | 1183 | 	 */ | 
 | 1184 |  | 
 | 1185 | 	rsr	a3, EXCSAVE_1	# get spill-mask | 
 | 1186 | 	slli	a2, a3, 1	# shift left by one | 
 | 1187 |  | 
 | 1188 | 	slli	a3, a2, 32-WSBITS | 
 | 1189 | 	src	a2, a2, a3	# a1 = xxwww1yyxxxwww1yy...... | 
 | 1190 | 	wsr	a2, WINDOWSTART	# set corrected windowstart | 
 | 1191 |  | 
 | 1192 | 	movi	a3, exc_table | 
 | 1193 | 	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE	# restore a2 | 
 | 1194 | 	l32i	a3, a3, EXC_TABLE_PARAM	# original WB (in user task) | 
 | 1195 |  | 
 | 1196 | 	/* Return to the original (user task) WINDOWBASE. | 
 | 1197 | 	 * We leave the following frame behind: | 
 | 1198 | 	 * a0, a1, a2	same | 
 | 1199 | 	 * a3:		trashed (saved in excsave_1) | 
 | 1200 | 	 * depc:	depc (we have to return to that address) | 
 | 1201 | 	 * excsave_1:	a3 | 
 | 1202 | 	 */ | 
 | 1203 |  | 
 | 1204 | 	wsr	a3, WINDOWBASE | 
 | 1205 | 	rsync | 
 | 1206 |  | 
 | 1207 | 	/* We are now in the original frame when we entered _spill_registers: | 
 | 1208 | 	 *  a0: return address | 
 | 1209 | 	 *  a1: used, stack pointer | 
 | 1210 | 	 *  a2: kernel stack pointer | 
 | 1211 | 	 *  a3: available, saved in EXCSAVE_1 | 
 | 1212 | 	 *  depc: exception address | 
 | 1213 | 	 *  excsave: a3 | 
 | 1214 | 	 * Note: This frame might be the same as above. | 
 | 1215 | 	 */ | 
 | 1216 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1217 | 	/* Setup stack pointer. */ | 
 | 1218 |  | 
 | 1219 | 	addi	a2, a2, -PT_USER_SIZE | 
 | 1220 | 	s32i	a0, a2, PT_AREG0 | 
 | 1221 |  | 
 | 1222 | 	/* Make sure we return to this fixup handler. */ | 
 | 1223 |  | 
 | 1224 | 	movi	a3, fast_syscall_spill_registers_fixup_return | 
 | 1225 | 	s32i	a3, a2, PT_DEPC		# setup depc | 
 | 1226 |  | 
 | 1227 | 	/* Jump to the exception handler. */ | 
 | 1228 |  | 
 | 1229 | 	movi	a3, exc_table | 
 | 1230 | 	rsr	a0, EXCCAUSE | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1231 |         addx4	a0, a0, a3              	# find entry in table | 
 | 1232 |         l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler | 
 | 1233 |         jx	a0 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1234 |  | 
 | 1235 | fast_syscall_spill_registers_fixup_return: | 
 | 1236 |  | 
 | 1237 | 	/* When we return here, all registers have been restored (a2: DEPC) */ | 
 | 1238 |  | 
 | 1239 | 	wsr	a2, DEPC		# exception address | 
 | 1240 |  | 
 | 1241 | 	/* Restore fixup handler. */ | 
 | 1242 |  | 
 | 1243 | 	xsr	a3, EXCSAVE_1 | 
 | 1244 | 	movi	a2, fast_syscall_spill_registers_fixup | 
 | 1245 | 	s32i	a2, a3, EXC_TABLE_FIXUP | 
 | 1246 | 	rsr	a2, WINDOWBASE | 
 | 1247 | 	s32i	a2, a3, EXC_TABLE_PARAM | 
 | 1248 | 	l32i	a2, a3, EXC_TABLE_KSTK | 
 | 1249 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1250 | 	/* Load WB at the time the exception occurred. */ | 
 | 1251 |  | 
 | 1252 | 	rsr	a3, SAR			# WB is still in SAR | 
 | 1253 | 	neg	a3, a3 | 
 | 1254 | 	wsr	a3, WINDOWBASE | 
 | 1255 | 	rsync | 
 | 1256 |  | 
 | 1257 | 	/* Restore a3 and return. */ | 
 | 1258 |  | 
 | 1259 | 	movi	a3, exc_table | 
 | 1260 | 	xsr	a3, EXCSAVE_1 | 
 | 1261 |  | 
 | 1262 | 	rfde | 
 | 1263 |  | 
 | 1264 |  | 
 | 1265 | /* | 
 | 1266 |  * spill all registers. | 
 | 1267 |  * | 
 | 1268 |  * This is not a real function. The following conditions must be met: | 
 | 1269 |  * | 
 | 1270 |  *  - must be called with call0. | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1271 |  *  - uses a3, a4 and SAR. | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1272 |  *  - the last 'valid' register of each frame are clobbered. | 
 | 1273 |  *  - the caller must have registered a fixup handler | 
 | 1274 |  *    (or be inside a critical section) | 
 | 1275 |  *  - PS_EXCM must be set (PS_WOE cleared?) | 
 | 1276 |  */ | 
 | 1277 |  | 
 | 1278 | ENTRY(_spill_registers) | 
 | 1279 |  | 
 | 1280 | 	/* | 
 | 1281 | 	 * Rotate ws so that the current windowbase is at bit 0. | 
 | 1282 | 	 * Assume ws = xxxwww1yy (www1 current window frame). | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1283 | 	 * Rotate ws right so that a4 = yyxxxwww1. | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1284 | 	 */ | 
 | 1285 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1286 | 	rsr	a4, WINDOWBASE | 
| Chris Zankel | ea0b6b0 | 2008-01-09 09:22:36 -0800 | [diff] [blame] | 1287 | 	rsr	a3, WINDOWSTART		# a3 = xxxwww1yy | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1288 | 	ssr	a4			# holds WB | 
 | 1289 | 	slli	a4, a3, WSBITS | 
 | 1290 | 	or	a3, a3, a4		# a3 = xxxwww1yyxxxwww1yy | 
| Chris Zankel | ea0b6b0 | 2008-01-09 09:22:36 -0800 | [diff] [blame] | 1291 | 	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1292 |  | 
 | 1293 | 	/* We are done if there are no more than the current register frame. */ | 
 | 1294 |  | 
| Chris Zankel | 50c0716 | 2007-11-14 13:47:02 -0800 | [diff] [blame] | 1295 | 	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1296 | 	movi	a4, (1 << (WSBITS-1)) | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1297 | 	_beqz	a3, .Lnospill		# only one active frame? jump | 
 | 1298 |  | 
 | 1299 | 	/* We want 1 at the top, so that we return to the current windowbase */ | 
 | 1300 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1301 | 	or	a3, a3, a4		# 1yyxxxwww | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1302 |  | 
 | 1303 | 	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ | 
 | 1304 |  | 
 | 1305 | 	wsr	a3, WINDOWSTART		# save shifted windowstart | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1306 | 	neg	a4, a3 | 
 | 1307 | 	and	a3, a4, a3		# first bit set from right: 000010000 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1308 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1309 | 	ffs_ws	a4, a3			# a4: shifts to skip empty frames | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1310 | 	movi	a3, WSBITS | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1311 | 	sub	a4, a3, a4		# WSBITS-a4:number of 0-bits from right | 
 | 1312 | 	ssr	a4			# save in SAR for later. | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1313 |  | 
 | 1314 | 	rsr	a3, WINDOWBASE | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1315 | 	add	a3, a3, a4 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1316 | 	wsr	a3, WINDOWBASE | 
 | 1317 | 	rsync | 
 | 1318 |  | 
 | 1319 | 	rsr	a3, WINDOWSTART | 
 | 1320 | 	srl	a3, a3			# shift windowstart | 
 | 1321 |  | 
 | 1322 | 	/* WB is now just one frame below the oldest frame in the register | 
 | 1323 | 	   window. WS is shifted so the oldest frame is in bit 0, thus, WB | 
 | 1324 | 	   and WS differ by one 4-register frame. */ | 
 | 1325 |  | 
 | 1326 | 	/* Save frames. Depending what call was used (call4, call8, call12), | 
 | 1327 | 	 * we have to save 4,8. or 12 registers. | 
 | 1328 | 	 */ | 
 | 1329 |  | 
 | 1330 | 	_bbsi.l	a3, 1, .Lc4 | 
 | 1331 | 	_bbsi.l	a3, 2, .Lc8 | 
 | 1332 |  | 
 | 1333 | 	/* Special case: we have a call12-frame starting at a4. */ | 
 | 1334 |  | 
 | 1335 | 	_bbci.l	a3, 3, .Lc12	# bit 3 shouldn't be zero! (Jump to Lc12 first) | 
 | 1336 |  | 
 | 1337 | 	s32e	a4, a1, -16	# a1 is valid with an empty spill area | 
 | 1338 | 	l32e	a4, a5, -12 | 
 | 1339 | 	s32e	a8, a4, -48 | 
 | 1340 | 	mov	a8, a4 | 
 | 1341 | 	l32e	a4, a1, -16 | 
 | 1342 | 	j	.Lc12c | 
 | 1343 |  | 
| Chris Zankel | 50c0716 | 2007-11-14 13:47:02 -0800 | [diff] [blame] | 1344 | .Lnospill: | 
| Chris Zankel | ea0b6b0 | 2008-01-09 09:22:36 -0800 | [diff] [blame] | 1345 | 	ret | 
| Chris Zankel | 50c0716 | 2007-11-14 13:47:02 -0800 | [diff] [blame] | 1346 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1347 | .Lloop: _bbsi.l	a3, 1, .Lc4 | 
 | 1348 | 	_bbci.l	a3, 2, .Lc12 | 
 | 1349 |  | 
 | 1350 | .Lc8:	s32e	a4, a13, -16 | 
 | 1351 | 	l32e	a4, a5, -12 | 
 | 1352 | 	s32e	a8, a4, -32 | 
 | 1353 | 	s32e	a5, a13, -12 | 
 | 1354 | 	s32e	a6, a13, -8 | 
 | 1355 | 	s32e	a7, a13, -4 | 
 | 1356 | 	s32e	a9, a4, -28 | 
 | 1357 | 	s32e	a10, a4, -24 | 
 | 1358 | 	s32e	a11, a4, -20 | 
 | 1359 |  | 
 | 1360 | 	srli	a11, a3, 2		# shift windowbase by 2 | 
 | 1361 | 	rotw	2 | 
 | 1362 | 	_bnei	a3, 1, .Lloop | 
 | 1363 |  | 
 | 1364 | .Lexit: /* Done. Do the final rotation, set WS, and return. */ | 
 | 1365 |  | 
 | 1366 | 	rotw	1 | 
 | 1367 | 	rsr	a3, WINDOWBASE | 
 | 1368 | 	ssl	a3 | 
 | 1369 | 	movi	a3, 1 | 
 | 1370 | 	sll	a3, a3 | 
 | 1371 | 	wsr	a3, WINDOWSTART | 
| Chris Zankel | ea0b6b0 | 2008-01-09 09:22:36 -0800 | [diff] [blame] | 1372 | 	ret | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1373 |  | 
 | 1374 | .Lc4:	s32e	a4, a9, -16 | 
 | 1375 | 	s32e	a5, a9, -12 | 
 | 1376 | 	s32e	a6, a9, -8 | 
 | 1377 | 	s32e	a7, a9, -4 | 
 | 1378 |  | 
 | 1379 | 	srli	a7, a3, 1 | 
 | 1380 | 	rotw	1 | 
 | 1381 | 	_bnei	a3, 1, .Lloop | 
 | 1382 | 	j	.Lexit | 
 | 1383 |  | 
 | 1384 | .Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero! | 
 | 1385 |  | 
 | 1386 | 	/* 12-register frame (call12) */ | 
 | 1387 |  | 
 | 1388 | 	l32e	a2, a5, -12 | 
 | 1389 | 	s32e	a8, a2, -48 | 
 | 1390 | 	mov	a8, a2 | 
 | 1391 |  | 
 | 1392 | .Lc12c: s32e	a9, a8, -44 | 
 | 1393 | 	s32e	a10, a8, -40 | 
 | 1394 | 	s32e	a11, a8, -36 | 
 | 1395 | 	s32e	a12, a8, -32 | 
 | 1396 | 	s32e	a13, a8, -28 | 
 | 1397 | 	s32e	a14, a8, -24 | 
 | 1398 | 	s32e	a15, a8, -20 | 
 | 1399 | 	srli	a15, a3, 3 | 
 | 1400 |  | 
 | 1401 | 	/* The stack pointer for a4..a7 is out of reach, so we rotate the | 
 | 1402 | 	 * window, grab the stackpointer, and rotate back. | 
 | 1403 | 	 * Alternatively, we could also use the following approach, but that | 
 | 1404 | 	 * makes the fixup routine much more complicated: | 
 | 1405 | 	 * rotw	1 | 
 | 1406 | 	 * s32e	a0, a13, -16 | 
 | 1407 | 	 * ... | 
 | 1408 | 	 * rotw 2 | 
 | 1409 | 	 */ | 
 | 1410 |  | 
 | 1411 | 	rotw	1 | 
 | 1412 | 	mov	a5, a13 | 
 | 1413 | 	rotw	-1 | 
 | 1414 |  | 
 | 1415 | 	s32e	a4, a9, -16 | 
 | 1416 | 	s32e	a5, a9, -12 | 
 | 1417 | 	s32e	a6, a9, -8 | 
 | 1418 | 	s32e	a7, a9, -4 | 
 | 1419 |  | 
 | 1420 | 	rotw	3 | 
 | 1421 |  | 
 | 1422 | 	_beqi	a3, 1, .Lexit | 
 | 1423 | 	j	.Lloop | 
 | 1424 |  | 
 | 1425 | .Linvalid_mask: | 
 | 1426 |  | 
 | 1427 | 	/* We get here because of an unrecoverable error in the window | 
 | 1428 | 	 * registers. If we are in user space, we kill the application, | 
 | 1429 | 	 * however, this condition is unrecoverable in kernel space. | 
 | 1430 | 	 */ | 
 | 1431 |  | 
 | 1432 | 	rsr	a0, PS | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 1433 | 	_bbci.l	a0, PS_UM_BIT, 1f | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1434 |  | 
 | 1435 |  	/* User space: Setup a dummy frame and kill application. | 
 | 1436 | 	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. | 
 | 1437 | 	 */ | 
 | 1438 |  | 
 | 1439 | 	movi	a0, 1 | 
 | 1440 | 	movi	a1, 0 | 
 | 1441 |  | 
 | 1442 | 	wsr	a0, WINDOWSTART | 
 | 1443 | 	wsr	a1, WINDOWBASE | 
 | 1444 | 	rsync | 
 | 1445 |  | 
 | 1446 | 	movi	a0, 0 | 
 | 1447 |  | 
 | 1448 | 	movi	a3, exc_table | 
 | 1449 | 	l32i	a1, a3, EXC_TABLE_KSTK | 
 | 1450 | 	wsr	a3, EXCSAVE_1 | 
 | 1451 |  | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 1452 | 	movi	a4, (1 << PS_WOE_BIT) | 1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1453 | 	wsr	a4, PS | 
 | 1454 | 	rsync | 
 | 1455 |  | 
 | 1456 | 	movi	a6, SIGSEGV | 
 | 1457 | 	movi	a4, do_exit | 
 | 1458 | 	callx4	a4 | 
 | 1459 |  | 
 | 1460 | 1:	/* Kernel space: PANIC! */ | 
 | 1461 |  | 
 | 1462 | 	wsr	a0, EXCSAVE_1 | 
 | 1463 | 	movi	a0, unrecoverable_exception | 
 | 1464 | 	callx0	a0		# should not return | 
 | 1465 | 1:	j	1b | 
 | 1466 |  | 
| Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 1467 | #ifdef CONFIG_MMU | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1468 | /* | 
 | 1469 |  * We should never get here. Bail out! | 
 | 1470 |  */ | 
 | 1471 |  | 
 | 1472 | ENTRY(fast_second_level_miss_double_kernel) | 
 | 1473 |  | 
 | 1474 | 1:	movi	a0, unrecoverable_exception | 
 | 1475 | 	callx0	a0		# should not return | 
 | 1476 | 1:	j	1b | 
 | 1477 |  | 
 | 1478 | /* First-level entry handler for user, kernel, and double 2nd-level | 
 | 1479 |  * TLB miss exceptions.  Note that for now, user and kernel miss | 
 | 1480 |  * exceptions share the same entry point and are handled identically. | 
 | 1481 |  * | 
 | 1482 |  * An old, less-efficient C version of this function used to exist. | 
 | 1483 |  * We include it below, interleaved as comments, for reference. | 
 | 1484 |  * | 
 | 1485 |  * Entry condition: | 
 | 1486 |  * | 
 | 1487 |  *   a0:	trashed, original value saved on stack (PT_AREG0) | 
 | 1488 |  *   a1:	a1 | 
 | 1489 |  *   a2:	new stack pointer, original in DEPC | 
 | 1490 |  *   a3:	dispatch table | 
 | 1491 |  *   depc:	a2, original value saved on stack (PT_DEPC) | 
 | 1492 |  *   excsave_1:	a3 | 
 | 1493 |  * | 
 | 1494 |  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 
 | 1495 |  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 
 | 1496 |  */ | 
 | 1497 |  | 
 | 1498 | ENTRY(fast_second_level_miss) | 
 | 1499 |  | 
 | 1500 | 	/* Save a1. Note: we don't expect a double exception. */ | 
 | 1501 |  | 
 | 1502 | 	s32i	a1, a2, PT_AREG1 | 
 | 1503 |  | 
 | 1504 | 	/* We need to map the page of PTEs for the user task.  Find | 
 | 1505 | 	 * the pointer to that page.  Also, it's possible for tsk->mm | 
 | 1506 | 	 * to be NULL while tsk->active_mm is nonzero if we faulted on | 
 | 1507 | 	 * a vmalloc address.  In that rare case, we must use | 
 | 1508 | 	 * active_mm instead to avoid a fault in this handler.  See | 
 | 1509 | 	 * | 
 | 1510 | 	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html | 
 | 1511 | 	 *   (or search Internet on "mm vs. active_mm") | 
 | 1512 | 	 * | 
 | 1513 | 	 *	if (!mm) | 
 | 1514 | 	 *		mm = tsk->active_mm; | 
 | 1515 | 	 *	pgd = pgd_offset (mm, regs->excvaddr); | 
 | 1516 | 	 *	pmd = pmd_offset (pgd, regs->excvaddr); | 
 | 1517 | 	 *	pmdval = *pmd; | 
 | 1518 | 	 */ | 
 | 1519 |  | 
 | 1520 | 	GET_CURRENT(a1,a2) | 
 | 1521 | 	l32i	a0, a1, TASK_MM		# tsk->mm | 
 | 1522 | 	beqz	a0, 9f | 
 | 1523 |  | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1524 |  | 
 | 1525 | 	/* We deliberately destroy a3 that holds the exception table. */ | 
 | 1526 |  | 
 | 1527 | 8:	rsr	a3, EXCVADDR		# fault address | 
 | 1528 | 	_PGD_OFFSET(a0, a3, a1) | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1529 | 	l32i	a0, a0, 0		# read pmdval | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1530 | 	beqz	a0, 2f | 
 | 1531 |  | 
 | 1532 | 	/* Read ptevaddr and convert to top of page-table page. | 
 | 1533 | 	 * | 
 | 1534 | 	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK; | 
 | 1535 | 	 * 	vpnval += DTLB_WAY_PGTABLE; | 
 | 1536 | 	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); | 
 | 1537 | 	 *	write_dtlb_entry (pteval, vpnval); | 
 | 1538 | 	 * | 
 | 1539 | 	 * The messy computation for 'pteval' above really simplifies | 
 | 1540 | 	 * into the following: | 
 | 1541 | 	 * | 
| Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 1542 | 	 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1543 | 	 */ | 
 | 1544 |  | 
 | 1545 | 	movi	a1, -PAGE_OFFSET | 
 | 1546 | 	add	a0, a0, a1		# pmdval - PAGE_OFFSET | 
 | 1547 | 	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK | 
 | 1548 | 	xor	a0, a0, a1 | 
 | 1549 |  | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1550 | 	movi	a1, _PAGE_DIRECTORY | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1551 | 	or	a0, a0, a1		# ... | PAGE_DIRECTORY | 
 | 1552 |  | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1553 | 	/* | 
| Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 1554 | 	 * We utilize all three wired-ways (7-9) to hold pmd translations. | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1555 | 	 * Memory regions are mapped to the DTLBs according to bits 28 and 29. | 
 | 1556 | 	 * This allows to map the three most common regions to three different | 
 | 1557 | 	 * DTLBs: | 
 | 1558 | 	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000) | 
 | 1559 | 	 *  2   -> way 8	shared libaries (2000.0000) | 
 | 1560 | 	 *  3   -> way 0	stack (3000.0000) | 
 | 1561 | 	 */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1562 |  | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1563 | 	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3 | 
 | 1564 | 	rsr	a1, PTEVADDR | 
 | 1565 | 	addx2	a3, a3, a3		# ->			0,3,6,9 | 
 | 1566 | 	srli	a1, a1, PAGE_SHIFT | 
 | 1567 | 	extui	a3, a3, 2, 2		# ->			0,0,1,2 | 
 | 1568 | 	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK | 
 | 1569 | 	addi	a3, a3, DTLB_WAY_PGD | 
 | 1570 | 	add	a1, a1, a3		# ... + way_number | 
 | 1571 |  | 
 | 1572 | 3:	wdtlb	a0, a1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1573 | 	dsync | 
 | 1574 |  | 
 | 1575 | 	/* Exit critical section. */ | 
 | 1576 |  | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1577 | 4:	movi	a3, exc_table		# restore a3 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1578 | 	movi	a0, 0 | 
 | 1579 | 	s32i	a0, a3, EXC_TABLE_FIXUP | 
 | 1580 |  | 
 | 1581 | 	/* Restore the working registers, and return. */ | 
 | 1582 |  | 
 | 1583 | 	l32i	a0, a2, PT_AREG0 | 
 | 1584 | 	l32i	a1, a2, PT_AREG1 | 
 | 1585 | 	l32i	a2, a2, PT_DEPC | 
 | 1586 | 	xsr	a3, EXCSAVE_1 | 
 | 1587 |  | 
 | 1588 | 	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | 
 | 1589 |  | 
 | 1590 | 	/* Restore excsave1 and return. */ | 
 | 1591 |  | 
 | 1592 | 	rsr	a2, DEPC | 
 | 1593 | 	rfe | 
 | 1594 |  | 
 | 1595 | 	/* Return from double exception. */ | 
 | 1596 |  | 
 | 1597 | 1:	xsr	a2, DEPC | 
 | 1598 | 	esync | 
 | 1599 | 	rfde | 
 | 1600 |  | 
 | 1601 | 9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0 | 
 | 1602 | 	j	8b | 
 | 1603 |  | 
| Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 1604 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | 
 | 1605 |  | 
 | 1606 | 2:	/* Special case for cache aliasing. | 
 | 1607 | 	 * We (should) only get here if a clear_user_page, copy_user_page | 
 | 1608 | 	 * or the aliased cache flush functions got preemptively interrupted  | 
 | 1609 | 	 * by another task. Re-establish temporary mapping to the  | 
 | 1610 | 	 * TLBTEMP_BASE areas. | 
 | 1611 | 	 */ | 
 | 1612 |  | 
 | 1613 | 	/* We shouldn't be in a double exception */ | 
 | 1614 |  | 
 | 1615 | 	l32i	a0, a2, PT_DEPC | 
 | 1616 | 	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f | 
 | 1617 |  | 
 | 1618 | 	/* Make sure the exception originated in the special functions */ | 
 | 1619 |  | 
 | 1620 | 	movi	a0, __tlbtemp_mapping_start | 
 | 1621 | 	rsr	a3, EPC_1 | 
 | 1622 | 	bltu	a3, a0, 2f | 
 | 1623 | 	movi	a0, __tlbtemp_mapping_end | 
 | 1624 | 	bgeu	a3, a0, 2f | 
 | 1625 |  | 
 | 1626 | 	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ | 
 | 1627 |  | 
 | 1628 | 	movi	a3, TLBTEMP_BASE_1 | 
 | 1629 | 	rsr	a0, EXCVADDR | 
 | 1630 | 	bltu	a0, a3, 2f | 
 | 1631 |  | 
 | 1632 | 	addi	a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) | 
 | 1633 | 	bgeu	a1, a3, 2f | 
 | 1634 |  | 
 | 1635 | 	/* Check if we have to restore an ITLB mapping. */ | 
 | 1636 |  | 
 | 1637 | 	movi	a1, __tlbtemp_mapping_itlb | 
 | 1638 | 	rsr	a3, EPC_1 | 
 | 1639 | 	sub	a3, a3, a1 | 
 | 1640 |  | 
 | 1641 | 	/* Calculate VPN */ | 
 | 1642 |  | 
 | 1643 | 	movi	a1, PAGE_MASK | 
 | 1644 | 	and	a1, a1, a0 | 
 | 1645 |  | 
 | 1646 | 	/* Jump for ITLB entry */ | 
 | 1647 |  | 
 | 1648 | 	bgez	a3, 1f | 
 | 1649 |  | 
 | 1650 | 	/* We can use up to two TLBTEMP areas, one for src and one for dst. */ | 
 | 1651 |  | 
 | 1652 | 	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 | 
 | 1653 | 	add	a1, a3, a1 | 
 | 1654 |  | 
 | 1655 | 	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ | 
 | 1656 |  | 
 | 1657 | 	mov	a0, a6 | 
 | 1658 | 	movnez	a0, a7, a3 | 
 | 1659 | 	j	3b | 
 | 1660 |  | 
 | 1661 | 	/* ITLB entry. We only use dst in a6. */ | 
 | 1662 |  | 
 | 1663 | 1:	witlb	a6, a1 | 
 | 1664 | 	isync | 
 | 1665 | 	j	4b | 
 | 1666 |  | 
 | 1667 |  | 
 | 1668 | #endif	// DCACHE_WAY_SIZE > PAGE_SIZE | 
 | 1669 |  | 
 | 1670 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1671 | 2:	/* Invalid PGD, default exception handling */ | 
 | 1672 |  | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1673 | 	movi	a3, exc_table | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1674 | 	rsr	a1, DEPC | 
 | 1675 | 	xsr	a3, EXCSAVE_1 | 
 | 1676 | 	s32i	a1, a2, PT_AREG2 | 
 | 1677 | 	s32i	a3, a2, PT_AREG3 | 
 | 1678 | 	mov	a1, a2 | 
 | 1679 |  | 
 | 1680 | 	rsr	a2, PS | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 1681 | 	bbsi.l	a2, PS_UM_BIT, 1f | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1682 | 	j	_kernel_exception | 
 | 1683 | 1:	j	_user_exception | 
 | 1684 |  | 
 | 1685 |  | 
 | 1686 | /* | 
 | 1687 |  * StoreProhibitedException | 
 | 1688 |  * | 
 | 1689 |  * Update the pte and invalidate the itlb mapping for this pte. | 
 | 1690 |  * | 
 | 1691 |  * Entry condition: | 
 | 1692 |  * | 
 | 1693 |  *   a0:	trashed, original value saved on stack (PT_AREG0) | 
 | 1694 |  *   a1:	a1 | 
 | 1695 |  *   a2:	new stack pointer, original in DEPC | 
 | 1696 |  *   a3:	dispatch table | 
 | 1697 |  *   depc:	a2, original value saved on stack (PT_DEPC) | 
 | 1698 |  *   excsave_1:	a3 | 
 | 1699 |  * | 
 | 1700 |  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 
 | 1701 |  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 
 | 1702 |  */ | 
 | 1703 |  | 
 | 1704 | ENTRY(fast_store_prohibited) | 
 | 1705 |  | 
 | 1706 | 	/* Save a1 and a4. */ | 
 | 1707 |  | 
 | 1708 | 	s32i	a1, a2, PT_AREG1 | 
 | 1709 | 	s32i	a4, a2, PT_AREG4 | 
 | 1710 |  | 
 | 1711 | 	GET_CURRENT(a1,a2) | 
 | 1712 | 	l32i	a0, a1, TASK_MM		# tsk->mm | 
 | 1713 | 	beqz	a0, 9f | 
 | 1714 |  | 
 | 1715 | 8:	rsr	a1, EXCVADDR		# fault address | 
 | 1716 | 	_PGD_OFFSET(a0, a1, a4) | 
 | 1717 | 	l32i	a0, a0, 0 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1718 | 	beqz	a0, 2f | 
 | 1719 |  | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1720 | 	/* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/ | 
 | 1721 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1722 | 	_PTE_OFFSET(a0, a1, a4) | 
 | 1723 | 	l32i	a4, a0, 0		# read pteval | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1724 | 	bbci.l	a4, _PAGE_WRITABLE_BIT, 2f | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1725 |  | 
| Chris Zankel | 01858d1 | 2007-08-06 23:57:57 -0700 | [diff] [blame] | 1726 | 	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1727 | 	or	a4, a4, a1 | 
 | 1728 | 	rsr	a1, EXCVADDR | 
 | 1729 | 	s32i	a4, a0, 0 | 
 | 1730 |  | 
 | 1731 | 	/* We need to flush the cache if we have page coloring. */ | 
 | 1732 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 
 | 1733 | 	dhwb	a0, 0 | 
 | 1734 | #endif | 
 | 1735 | 	pdtlb	a0, a1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1736 | 	wdtlb	a4, a0 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1737 |  | 
 | 1738 | 	/* Exit critical section. */ | 
 | 1739 |  | 
 | 1740 | 	movi	a0, 0 | 
 | 1741 | 	s32i	a0, a3, EXC_TABLE_FIXUP | 
 | 1742 |  | 
 | 1743 | 	/* Restore the working registers, and return. */ | 
 | 1744 |  | 
 | 1745 | 	l32i	a4, a2, PT_AREG4 | 
 | 1746 | 	l32i	a1, a2, PT_AREG1 | 
 | 1747 | 	l32i	a0, a2, PT_AREG0 | 
 | 1748 | 	l32i	a2, a2, PT_DEPC | 
 | 1749 |  | 
 | 1750 | 	/* Restore excsave1 and a3. */ | 
 | 1751 |  | 
 | 1752 | 	xsr	a3, EXCSAVE_1 | 
 | 1753 | 	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | 
 | 1754 |  | 
 | 1755 | 	rsr	a2, DEPC | 
 | 1756 | 	rfe | 
 | 1757 |  | 
 | 1758 | 	/* Double exception. Restore FIXUP handler and return. */ | 
 | 1759 |  | 
 | 1760 | 1:	xsr	a2, DEPC | 
 | 1761 | 	esync | 
 | 1762 | 	rfde | 
 | 1763 |  | 
 | 1764 | 9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0 | 
 | 1765 | 	j	8b | 
 | 1766 |  | 
 | 1767 | 2:	/* If there was a problem, handle fault in C */ | 
 | 1768 |  | 
 | 1769 | 	rsr	a4, DEPC	# still holds a2 | 
 | 1770 | 	xsr	a3, EXCSAVE_1 | 
 | 1771 | 	s32i	a4, a2, PT_AREG2 | 
 | 1772 | 	s32i	a3, a2, PT_AREG3 | 
 | 1773 | 	l32i	a4, a2, PT_AREG4 | 
 | 1774 | 	mov	a1, a2 | 
 | 1775 |  | 
 | 1776 | 	rsr	a2, PS | 
| Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 1777 | 	bbsi.l	a2, PS_UM_BIT, 1f | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1778 | 	j	_kernel_exception | 
 | 1779 | 1:	j	_user_exception | 
| Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 1780 | #endif /* CONFIG_MMU */ | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1781 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1782 | /* | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1783 |  * System Calls. | 
 | 1784 |  * | 
 | 1785 |  * void system_call (struct pt_regs* regs, int exccause) | 
 | 1786 |  *                            a2                 a3 | 
 | 1787 |  */ | 
 | 1788 |  | 
 | 1789 | ENTRY(system_call) | 
 | 1790 | 	entry	a1, 32 | 
 | 1791 |  | 
 | 1792 | 	/* regs->syscall = regs->areg[2] */ | 
 | 1793 |  | 
 | 1794 | 	l32i	a3, a2, PT_AREG2 | 
 | 1795 | 	mov	a6, a2 | 
 | 1796 | 	movi	a4, do_syscall_trace_enter | 
 | 1797 | 	s32i	a3, a2, PT_SYSCALL | 
 | 1798 | 	callx4	a4 | 
 | 1799 |  | 
 | 1800 | 	/* syscall = sys_call_table[syscall_nr] */ | 
 | 1801 |  | 
 | 1802 | 	movi	a4, sys_call_table; | 
 | 1803 | 	movi	a5, __NR_syscall_count | 
 | 1804 | 	movi	a6, -ENOSYS | 
 | 1805 | 	bgeu	a3, a5, 1f | 
 | 1806 |  | 
 | 1807 | 	addx4	a4, a3, a4 | 
 | 1808 | 	l32i	a4, a4, 0 | 
 | 1809 | 	movi	a5, sys_ni_syscall; | 
 | 1810 | 	beq	a4, a5, 1f | 
 | 1811 |  | 
 | 1812 | 	/* Load args: arg0 - arg5 are passed via regs. */ | 
 | 1813 |  | 
 | 1814 | 	l32i	a6, a2, PT_AREG6 | 
 | 1815 | 	l32i	a7, a2, PT_AREG3 | 
 | 1816 | 	l32i	a8, a2, PT_AREG4 | 
 | 1817 | 	l32i	a9, a2, PT_AREG5 | 
 | 1818 | 	l32i	a10, a2, PT_AREG8 | 
 | 1819 | 	l32i	a11, a2, PT_AREG9 | 
 | 1820 |  | 
 | 1821 | 	/* Pass one additional argument to the syscall: pt_regs (on stack) */ | 
 | 1822 | 	s32i	a2, a1, 0 | 
 | 1823 |  | 
 | 1824 | 	callx4	a4 | 
 | 1825 |  | 
 | 1826 | 1:	/* regs->areg[2] = return_value */ | 
 | 1827 |  | 
 | 1828 | 	s32i	a6, a2, PT_AREG2 | 
 | 1829 | 	movi	a4, do_syscall_trace_leave | 
 | 1830 | 	mov	a6, a2 | 
 | 1831 | 	callx4	a4 | 
 | 1832 | 	retw | 
 | 1833 |  | 
 | 1834 |  | 
 | 1835 | /* | 
 | 1836 |  * Create a kernel thread | 
 | 1837 |  * | 
 | 1838 |  * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | 
 | 1839 |  * a2                    a2                 a3             a4 | 
 | 1840 |  */ | 
 | 1841 |  | 
 | 1842 | ENTRY(kernel_thread) | 
 | 1843 | 	entry	a1, 16 | 
 | 1844 |  | 
 | 1845 | 	mov	a5, a2			# preserve fn over syscall | 
 | 1846 | 	mov	a7, a3			# preserve args over syscall | 
 | 1847 |  | 
 | 1848 | 	movi	a3, _CLONE_VM | _CLONE_UNTRACED | 
 | 1849 | 	movi	a2, __NR_clone | 
 | 1850 | 	or	a6, a4, a3		# arg0: flags | 
 | 1851 | 	mov	a3, a1			# arg1: sp | 
 | 1852 | 	syscall | 
 | 1853 |  | 
 | 1854 | 	beq	a3, a1, 1f		# branch if parent | 
 | 1855 | 	mov	a6, a7			# args | 
 | 1856 | 	callx4	a5			# fn(args) | 
 | 1857 |  | 
 | 1858 | 	movi	a2, __NR_exit | 
 | 1859 | 	syscall				# return value of fn(args) still in a6 | 
 | 1860 |  | 
 | 1861 | 1:	retw | 
 | 1862 |  | 
 | 1863 | /* | 
 | 1864 |  * Do a system call from kernel instead of calling sys_execve, so we end up | 
 | 1865 |  * with proper pt_regs. | 
 | 1866 |  * | 
 | 1867 |  * int kernel_execve(const char *fname, char *const argv[], charg *const envp[]) | 
 | 1868 |  * a2                        a2               a3                  a4 | 
 | 1869 |  */ | 
 | 1870 |  | 
 | 1871 | ENTRY(kernel_execve) | 
 | 1872 | 	entry	a1, 16 | 
 | 1873 | 	mov	a6, a2			# arg0 is in a6 | 
 | 1874 | 	movi	a2, __NR_execve | 
 | 1875 | 	syscall | 
 | 1876 |  | 
 | 1877 | 	retw | 
 | 1878 |  | 
 | 1879 | /* | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1880 |  * Task switch. | 
 | 1881 |  * | 
 | 1882 |  * struct task*  _switch_to (struct task* prev, struct task* next) | 
 | 1883 |  *         a2                              a2                 a3 | 
 | 1884 |  */ | 
 | 1885 |  | 
 | 1886 | ENTRY(_switch_to) | 
 | 1887 |  | 
 | 1888 | 	entry	a1, 16 | 
 | 1889 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1890 | 	mov	a12, a2			# preserve 'prev' (a2) | 
 | 1891 | 	mov	a13, a3			# and 'next' (a3) | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1892 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1893 | 	l32i	a4, a2, TASK_THREAD_INFO | 
 | 1894 | 	l32i	a5, a3, TASK_THREAD_INFO | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1895 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1896 | 	save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1897 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1898 | 	s32i	a0, a12, THREAD_RA	# save return address | 
 | 1899 | 	s32i	a1, a12, THREAD_SP	# save stack pointer | 
 | 1900 |  | 
 | 1901 | 	/* Disable ints while we manipulate the stack pointer. */ | 
 | 1902 |  | 
 | 1903 | 	movi	a14, (1 << PS_EXCM_BIT) | LOCKLEVEL | 
 | 1904 | 	xsr	a14, PS | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1905 | 	rsr	a3, EXCSAVE_1 | 
 | 1906 | 	rsync | 
 | 1907 | 	s32i	a3, a3, EXC_TABLE_FIXUP	/* enter critical section */ | 
 | 1908 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1909 | 	/* Switch CPENABLE */ | 
 | 1910 |  | 
 | 1911 | #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) | 
 | 1912 | 	l32i	a3, a5, THREAD_CPENABLE | 
 | 1913 | 	xsr	a3, CPENABLE | 
 | 1914 | 	s32i	a3, a4, THREAD_CPENABLE | 
 | 1915 | #endif | 
 | 1916 |  | 
 | 1917 | 	/* Flush register file. */ | 
 | 1918 |  | 
 | 1919 | 	call0	_spill_registers	# destroys a3, a4, and SAR | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1920 |  | 
 | 1921 | 	/* Set kernel stack (and leave critical section) | 
 | 1922 | 	 * Note: It's save to set it here. The stack will not be overwritten | 
 | 1923 | 	 *       because the kernel stack will only be loaded again after | 
 | 1924 | 	 *       we return from kernel space. | 
 | 1925 | 	 */ | 
 | 1926 |  | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1927 | 	rsr	a3, EXCSAVE_1		# exc_table | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1928 | 	movi	a6, 0 | 
 | 1929 | 	addi	a7, a5, PT_REGS_OFFSET | 
 | 1930 | 	s32i	a6, a3, EXC_TABLE_FIXUP | 
 | 1931 | 	s32i	a7, a3, EXC_TABLE_KSTK | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1932 |  | 
 | 1933 | 	/* restore context of the task that 'next' addresses */ | 
 | 1934 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1935 | 	l32i	a0, a13, THREAD_RA	# restore return address | 
 | 1936 | 	l32i	a1, a13, THREAD_SP	# restore stack pointer | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1937 |  | 
| Chris Zankel | c658eac | 2008-02-12 13:17:07 -0800 | [diff] [blame] | 1938 | 	load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER | 
 | 1939 |  | 
 | 1940 | 	wsr	a14, PS | 
 | 1941 | 	mov	a2, a12			# return 'prev' | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1942 | 	rsync | 
 | 1943 |  | 
 | 1944 | 	retw | 
 | 1945 |  | 
 | 1946 |  | 
 | 1947 | ENTRY(ret_from_fork) | 
 | 1948 |  | 
 | 1949 | 	/* void schedule_tail (struct task_struct *prev) | 
 | 1950 | 	 * Note: prev is still in a6 (return value from fake call4 frame) | 
 | 1951 | 	 */ | 
 | 1952 | 	movi	a4, schedule_tail | 
 | 1953 | 	callx4	a4 | 
 | 1954 |  | 
| Chris Zankel | fc4fb2a | 2006-12-10 02:18:52 -0800 | [diff] [blame] | 1955 | 	movi	a4, do_syscall_trace_leave | 
 | 1956 | 	mov	a6, a1 | 
| Chris Zankel | 5a0015d | 2005-06-23 22:01:16 -0700 | [diff] [blame] | 1957 | 	callx4	a4 | 
 | 1958 |  | 
 | 1959 | 	j	common_exception_return | 
 | 1960 |  |