| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 3 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 4 |  * for more details. | 
 | 5 |  * | 
 | 6 |  * arch/sh64/kernel/entry.S | 
 | 7 |  * | 
 | 8 |  * Copyright (C) 2000, 2001  Paolo Alberelli | 
 | 9 |  * Copyright (C) 2004, 2005  Paul Mundt | 
 | 10 |  * Copyright (C) 2003, 2004 Richard Curnow | 
 | 11 |  * | 
 | 12 |  */ | 
 | 13 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/errno.h> | 
 | 15 | #include <linux/sys.h> | 
 | 16 |  | 
 | 17 | #include <asm/processor.h> | 
 | 18 | #include <asm/registers.h> | 
 | 19 | #include <asm/unistd.h> | 
 | 20 | #include <asm/thread_info.h> | 
 | 21 | #include <asm/asm-offsets.h> | 
 | 22 |  | 
 | 23 | /* | 
 | 24 |  * SR fields. | 
 | 25 |  */ | 
 | 26 | #define SR_ASID_MASK	0x00ff0000 | 
 | 27 | #define SR_FD_MASK	0x00008000 | 
 | 28 | #define SR_SS		0x08000000 | 
 | 29 | #define SR_BL		0x10000000 | 
 | 30 | #define SR_MD		0x40000000 | 
 | 31 |  | 
 | 32 | /* | 
 | 33 |  * Event code. | 
 | 34 |  */ | 
 | 35 | #define	EVENT_INTERRUPT		0 | 
 | 36 | #define	EVENT_FAULT_TLB		1 | 
 | 37 | #define	EVENT_FAULT_NOT_TLB	2 | 
 | 38 | #define	EVENT_DEBUG		3 | 
 | 39 |  | 
 | 40 | /* EXPEVT values */ | 
 | 41 | #define	RESET_CAUSE		0x20 | 
 | 42 | #define DEBUGSS_CAUSE		0x980 | 
 | 43 |  | 
 | 44 | /* | 
 | 45 |  * Frame layout. Quad index. | 
 | 46 |  */ | 
 | 47 | #define	FRAME_T(x)	FRAME_TBASE+(x*8) | 
 | 48 | #define	FRAME_R(x)	FRAME_RBASE+(x*8) | 
 | 49 | #define	FRAME_S(x)	FRAME_SBASE+(x*8) | 
 | 50 | #define FSPC		0 | 
 | 51 | #define FSSR		1 | 
 | 52 | #define FSYSCALL_ID	2 | 
 | 53 |  | 
 | 54 | /* Arrange the save frame to be a multiple of 32 bytes long */ | 
 | 55 | #define FRAME_SBASE	0 | 
 | 56 | #define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */ | 
 | 57 | #define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */ | 
 | 58 | #define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* tr0 -tr7 */ | 
 | 59 | #define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */ | 
 | 60 |  | 
 | 61 | #define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */ | 
 | 62 | #define FP_FRAME_BASE	0 | 
 | 63 |  | 
 | 64 | #define	SAVED_R2	0*8 | 
 | 65 | #define	SAVED_R3	1*8 | 
 | 66 | #define	SAVED_R4	2*8 | 
 | 67 | #define	SAVED_R5	3*8 | 
 | 68 | #define	SAVED_R18	4*8 | 
 | 69 | #define	SAVED_R6	5*8 | 
 | 70 | #define	SAVED_TR0	6*8 | 
 | 71 |  | 
 | 72 | /* These are the registers saved in the TLB path that aren't saved in the first | 
 | 73 |    level of the normal one. */ | 
 | 74 | #define	TLB_SAVED_R25	7*8 | 
 | 75 | #define	TLB_SAVED_TR1	8*8 | 
 | 76 | #define	TLB_SAVED_TR2	9*8 | 
 | 77 | #define	TLB_SAVED_TR3	10*8 | 
 | 78 | #define	TLB_SAVED_TR4	11*8 | 
 | 79 | /* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing | 
 | 80 |    breakage otherwise. */ | 
 | 81 | #define	TLB_SAVED_R0	12*8 | 
 | 82 | #define	TLB_SAVED_R1	13*8 | 
 | 83 |  | 
 | 84 | #define CLI()				\ | 
 | 85 | 	getcon	SR, r6;			\ | 
 | 86 | 	ori	r6, 0xf0, r6;		\ | 
 | 87 | 	putcon	r6, SR; | 
 | 88 |  | 
 | 89 | #define STI()				\ | 
 | 90 | 	getcon	SR, r6;			\ | 
 | 91 | 	andi	r6, ~0xf0, r6;		\ | 
 | 92 | 	putcon	r6, SR; | 
 | 93 |  | 
 | 94 | #ifdef CONFIG_PREEMPT | 
 | 95 | #  define preempt_stop()	CLI() | 
 | 96 | #else | 
 | 97 | #  define preempt_stop() | 
 | 98 | #  define resume_kernel		restore_all | 
 | 99 | #endif | 
 | 100 |  | 
 | 101 | 	.section	.data, "aw" | 
 | 102 |  | 
 | 103 | #define FAST_TLBMISS_STACK_CACHELINES 4 | 
 | 104 | #define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES) | 
 | 105 |  | 
 | 106 | /* Register back-up area for all exceptions */ | 
 | 107 | 	.balign	32 | 
 | 108 | 	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling | 
 | 109 | 	 * register saves etc. */ | 
 | 110 | 	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0 | 
 | 111 | /* This is 32 byte aligned by construction */ | 
 | 112 | /* Register back-up area for all exceptions */ | 
 | 113 | reg_save_area: | 
 | 114 | 	.quad	0 | 
 | 115 | 	.quad	0 | 
 | 116 | 	.quad	0 | 
 | 117 | 	.quad	0 | 
 | 118 |  | 
 | 119 | 	.quad	0 | 
 | 120 | 	.quad	0 | 
 | 121 | 	.quad	0 | 
 | 122 | 	.quad	0 | 
 | 123 |  | 
 | 124 | 	.quad	0 | 
 | 125 | 	.quad	0 | 
 | 126 | 	.quad	0 | 
 | 127 | 	.quad	0 | 
 | 128 |  | 
 | 129 | 	.quad	0 | 
 | 130 | 	.quad   0 | 
 | 131 |  | 
 | 132 | /* Save area for RESVEC exceptions. We cannot use reg_save_area because of | 
 | 133 |  * reentrancy. Note this area may be accessed via physical address. | 
 | 134 |  * Align so this fits a whole single cache line, for ease of purging. | 
 | 135 |  */ | 
 | 136 | 	.balign 32,0,32 | 
 | 137 | resvec_save_area: | 
 | 138 | 	.quad	0 | 
 | 139 | 	.quad	0 | 
 | 140 | 	.quad	0 | 
 | 141 | 	.quad	0 | 
 | 142 | 	.quad	0 | 
 | 143 | 	.balign 32,0,32 | 
 | 144 |  | 
 | 145 | /* Jump table of 3rd level handlers  */ | 
 | 146 | trap_jtable: | 
 | 147 | 	.long	do_exception_error		/* 0x000 */ | 
 | 148 | 	.long	do_exception_error		/* 0x020 */ | 
 | 149 | 	.long	tlb_miss_load				/* 0x040 */ | 
 | 150 | 	.long	tlb_miss_store				/* 0x060 */ | 
 | 151 | 	! ARTIFICIAL pseudo-EXPEVT setting | 
 | 152 | 	.long	do_debug_interrupt		/* 0x080 */ | 
 | 153 | 	.long	tlb_miss_load				/* 0x0A0 */ | 
 | 154 | 	.long	tlb_miss_store				/* 0x0C0 */ | 
 | 155 | 	.long	do_address_error_load	/* 0x0E0 */ | 
 | 156 | 	.long	do_address_error_store	/* 0x100 */ | 
 | 157 | #ifdef CONFIG_SH_FPU | 
 | 158 | 	.long	do_fpu_error		/* 0x120 */ | 
 | 159 | #else | 
 | 160 | 	.long	do_exception_error		/* 0x120 */ | 
 | 161 | #endif | 
 | 162 | 	.long	do_exception_error		/* 0x140 */ | 
 | 163 | 	.long	system_call				/* 0x160 */ | 
 | 164 | 	.long	do_reserved_inst		/* 0x180 */ | 
 | 165 | 	.long	do_illegal_slot_inst	/* 0x1A0 */ | 
 | 166 | 	.long	do_NMI			/* 0x1C0 */ | 
 | 167 | 	.long	do_exception_error		/* 0x1E0 */ | 
 | 168 | 	.rept 15 | 
 | 169 | 		.long do_IRQ		/* 0x200 - 0x3C0 */ | 
 | 170 | 	.endr | 
 | 171 | 	.long	do_exception_error		/* 0x3E0 */ | 
 | 172 | 	.rept 32 | 
 | 173 | 		.long do_IRQ		/* 0x400 - 0x7E0 */ | 
 | 174 | 	.endr | 
 | 175 | 	.long	fpu_error_or_IRQA			/* 0x800 */ | 
 | 176 | 	.long	fpu_error_or_IRQB			/* 0x820 */ | 
 | 177 | 	.long	do_IRQ			/* 0x840 */ | 
 | 178 | 	.long	do_IRQ			/* 0x860 */ | 
 | 179 | 	.rept 6 | 
 | 180 | 		.long do_exception_error	/* 0x880 - 0x920 */ | 
 | 181 | 	.endr | 
 | 182 | 	.long	do_software_break_point	/* 0x940 */ | 
 | 183 | 	.long	do_exception_error		/* 0x960 */ | 
 | 184 | 	.long	do_single_step		/* 0x980 */ | 
 | 185 |  | 
 | 186 | 	.rept 3 | 
 | 187 | 		.long do_exception_error	/* 0x9A0 - 0x9E0 */ | 
 | 188 | 	.endr | 
 | 189 | 	.long	do_IRQ			/* 0xA00 */ | 
 | 190 | 	.long	do_IRQ			/* 0xA20 */ | 
 | 191 | 	.long	itlb_miss_or_IRQ			/* 0xA40 */ | 
 | 192 | 	.long	do_IRQ			/* 0xA60 */ | 
 | 193 | 	.long	do_IRQ			/* 0xA80 */ | 
 | 194 | 	.long	itlb_miss_or_IRQ			/* 0xAA0 */ | 
 | 195 | 	.long	do_exception_error		/* 0xAC0 */ | 
 | 196 | 	.long	do_address_error_exec	/* 0xAE0 */ | 
 | 197 | 	.rept 8 | 
 | 198 | 		.long do_exception_error	/* 0xB00 - 0xBE0 */ | 
 | 199 | 	.endr | 
 | 200 | 	.rept 18 | 
 | 201 | 		.long do_IRQ		/* 0xC00 - 0xE20 */ | 
 | 202 | 	.endr | 
 | 203 |  | 
 | 204 | 	.section	.text64, "ax" | 
 | 205 |  | 
 | 206 | /* | 
 | 207 |  * --- Exception/Interrupt/Event Handling Section | 
 | 208 |  */ | 
 | 209 |  | 
 | 210 | /* | 
 | 211 |  * VBR and RESVEC blocks. | 
 | 212 |  * | 
 | 213 |  * First level handler for VBR-based exceptions. | 
 | 214 |  * | 
 | 215 |  * To avoid waste of space, align to the maximum text block size. | 
 | 216 |  * This is assumed to be at most 128 bytes or 32 instructions. | 
 | 217 |  * DO NOT EXCEED 32 instructions on the first level handlers ! | 
 | 218 |  * | 
 | 219 |  * Also note that RESVEC is contained within the VBR block | 
 | 220 |  * where the room left (1KB - TEXT_SIZE) allows placing | 
 | 221 |  * the RESVEC block (at most 512B + TEXT_SIZE). | 
 | 222 |  * | 
 | 223 |  * So first (and only) level handler for RESVEC-based exceptions. | 
 | 224 |  * | 
 | 225 |  * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss | 
 | 226 |  * and interrupt) we are a lot tight with register space until | 
 | 227 |  * saving onto the stack frame, which is done in handle_exception(). | 
 | 228 |  * | 
 | 229 |  */ | 
 | 230 |  | 
 | 231 | #define	TEXT_SIZE 	128 | 
 | 232 | #define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */ | 
 | 233 |  | 
 | 234 | 	.balign TEXT_SIZE | 
 | 235 | LVBR_block: | 
 | 236 | 	.space	256, 0			/* Power-on class handler, */ | 
 | 237 | 					/* not required here       */ | 
 | 238 | not_a_tlb_miss: | 
 | 239 | 	synco	/* TAKum03020 (but probably a good idea anyway.) */ | 
 | 240 | 	/* Save original stack pointer into KCR1 */ | 
 | 241 | 	putcon	SP, KCR1 | 
 | 242 |  | 
 | 243 | 	/* Save other original registers into reg_save_area */ | 
 | 244 |         movi  reg_save_area, SP | 
 | 245 | 	st.q	SP, SAVED_R2, r2 | 
 | 246 | 	st.q	SP, SAVED_R3, r3 | 
 | 247 | 	st.q	SP, SAVED_R4, r4 | 
 | 248 | 	st.q	SP, SAVED_R5, r5 | 
 | 249 | 	st.q	SP, SAVED_R6, r6 | 
 | 250 | 	st.q	SP, SAVED_R18, r18 | 
 | 251 | 	gettr	tr0, r3 | 
 | 252 | 	st.q	SP, SAVED_TR0, r3 | 
 | 253 |  | 
 | 254 | 	/* Set args for Non-debug, Not a TLB miss class handler */ | 
 | 255 | 	getcon	EXPEVT, r2 | 
 | 256 | 	movi	ret_from_exception, r3 | 
 | 257 | 	ori	r3, 1, r3 | 
 | 258 | 	movi	EVENT_FAULT_NOT_TLB, r4 | 
 | 259 | 	or	SP, ZERO, r5 | 
 | 260 | 	getcon	KCR1, SP | 
 | 261 | 	pta	handle_exception, tr0 | 
 | 262 | 	blink	tr0, ZERO | 
 | 263 |  | 
 | 264 | 	.balign 256 | 
 | 265 | 	! VBR+0x200 | 
 | 266 | 	nop | 
 | 267 | 	.balign 256 | 
 | 268 | 	! VBR+0x300 | 
 | 269 | 	nop | 
 | 270 | 	.balign 256 | 
 | 271 | 	/* | 
 | 272 | 	 * Instead of the natural .balign 1024 place RESVEC here | 
 | 273 | 	 * respecting the final 1KB alignment. | 
 | 274 | 	 */ | 
 | 275 | 	.balign TEXT_SIZE | 
 | 276 | 	/* | 
 | 277 | 	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC | 
 | 278 | 	 * block making sure the final alignment is correct. | 
 | 279 | 	 */ | 
 | 280 | tlb_miss: | 
 | 281 | 	synco	/* TAKum03020 (but probably a good idea anyway.) */ | 
 | 282 | 	putcon	SP, KCR1 | 
 | 283 | 	movi	reg_save_area, SP | 
 | 284 | 	/* SP is guaranteed 32-byte aligned. */ | 
 | 285 | 	st.q	SP, TLB_SAVED_R0 , r0 | 
 | 286 | 	st.q	SP, TLB_SAVED_R1 , r1 | 
 | 287 | 	st.q	SP, SAVED_R2 , r2 | 
 | 288 | 	st.q	SP, SAVED_R3 , r3 | 
 | 289 | 	st.q	SP, SAVED_R4 , r4 | 
 | 290 | 	st.q	SP, SAVED_R5 , r5 | 
 | 291 | 	st.q	SP, SAVED_R6 , r6 | 
 | 292 | 	st.q	SP, SAVED_R18, r18 | 
 | 293 |  | 
 | 294 | 	/* Save R25 for safety; as/ld may want to use it to achieve the call to | 
 | 295 | 	 * the code in mm/tlbmiss.c */ | 
 | 296 | 	st.q	SP, TLB_SAVED_R25, r25 | 
 | 297 | 	gettr	tr0, r2 | 
 | 298 | 	gettr	tr1, r3 | 
 | 299 | 	gettr	tr2, r4 | 
 | 300 | 	gettr	tr3, r5 | 
 | 301 | 	gettr	tr4, r18 | 
 | 302 | 	st.q	SP, SAVED_TR0 , r2 | 
 | 303 | 	st.q	SP, TLB_SAVED_TR1 , r3 | 
 | 304 | 	st.q	SP, TLB_SAVED_TR2 , r4 | 
 | 305 | 	st.q	SP, TLB_SAVED_TR3 , r5 | 
 | 306 | 	st.q	SP, TLB_SAVED_TR4 , r18 | 
 | 307 |  | 
 | 308 | 	pt	do_fast_page_fault, tr0 | 
 | 309 | 	getcon	SSR, r2 | 
 | 310 | 	getcon	EXPEVT, r3 | 
 | 311 | 	getcon	TEA, r4 | 
 | 312 | 	shlri	r2, 30, r2 | 
 | 313 | 	andi	r2, 1, r2	/* r2 = SSR.MD */ | 
 | 314 | 	blink 	tr0, LINK | 
 | 315 |  | 
 | 316 | 	pt	fixup_to_invoke_general_handler, tr1 | 
 | 317 |  | 
 | 318 | 	/* If the fast path handler fixed the fault, just drop through quickly | 
 | 319 | 	   to the restore code right away to return to the excepting context. | 
 | 320 | 	   */ | 
 | 321 | 	beqi/u	r2, 0, tr1 | 
 | 322 |  | 
 | 323 | fast_tlb_miss_restore: | 
 | 324 | 	ld.q	SP, SAVED_TR0, r2 | 
 | 325 | 	ld.q	SP, TLB_SAVED_TR1, r3 | 
 | 326 | 	ld.q	SP, TLB_SAVED_TR2, r4 | 
 | 327 |  | 
 | 328 | 	ld.q	SP, TLB_SAVED_TR3, r5 | 
 | 329 | 	ld.q	SP, TLB_SAVED_TR4, r18 | 
 | 330 |  | 
 | 331 | 	ptabs	r2, tr0 | 
 | 332 | 	ptabs	r3, tr1 | 
 | 333 | 	ptabs	r4, tr2 | 
 | 334 | 	ptabs	r5, tr3 | 
 | 335 | 	ptabs	r18, tr4 | 
 | 336 |  | 
 | 337 | 	ld.q	SP, TLB_SAVED_R0, r0 | 
 | 338 | 	ld.q	SP, TLB_SAVED_R1, r1 | 
 | 339 | 	ld.q	SP, SAVED_R2, r2 | 
 | 340 | 	ld.q	SP, SAVED_R3, r3 | 
 | 341 | 	ld.q	SP, SAVED_R4, r4 | 
 | 342 | 	ld.q	SP, SAVED_R5, r5 | 
 | 343 | 	ld.q	SP, SAVED_R6, r6 | 
 | 344 | 	ld.q	SP, SAVED_R18, r18 | 
 | 345 | 	ld.q	SP, TLB_SAVED_R25, r25 | 
 | 346 |  | 
 | 347 | 	getcon	KCR1, SP | 
 | 348 | 	rte | 
 | 349 | 	nop /* for safety, in case the code is run on sh5-101 cut1.x */ | 
 | 350 |  | 
 | 351 | fixup_to_invoke_general_handler: | 
 | 352 |  | 
 | 353 | 	/* OK, new method.  Restore stuff that's not expected to get saved into | 
 | 354 | 	   the 'first-level' reg save area, then just fall through to setting | 
 | 355 | 	   up the registers and calling the second-level handler. */ | 
 | 356 |  | 
 | 357 | 	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore | 
 | 358 | 	   r25,tr1-4 and save r6 to get into the right state.  */ | 
 | 359 |  | 
 | 360 | 	ld.q	SP, TLB_SAVED_TR1, r3 | 
 | 361 | 	ld.q	SP, TLB_SAVED_TR2, r4 | 
 | 362 | 	ld.q	SP, TLB_SAVED_TR3, r5 | 
 | 363 | 	ld.q	SP, TLB_SAVED_TR4, r18 | 
 | 364 | 	ld.q	SP, TLB_SAVED_R25, r25 | 
 | 365 |  | 
 | 366 | 	ld.q	SP, TLB_SAVED_R0, r0 | 
 | 367 | 	ld.q	SP, TLB_SAVED_R1, r1 | 
 | 368 |  | 
 | 369 | 	ptabs/u	r3, tr1 | 
 | 370 | 	ptabs/u	r4, tr2 | 
 | 371 | 	ptabs/u	r5, tr3 | 
 | 372 | 	ptabs/u	r18, tr4 | 
 | 373 |  | 
 | 374 | 	/* Set args for Non-debug, TLB miss class handler */ | 
 | 375 | 	getcon	EXPEVT, r2 | 
 | 376 | 	movi	ret_from_exception, r3 | 
 | 377 | 	ori	r3, 1, r3 | 
 | 378 | 	movi	EVENT_FAULT_TLB, r4 | 
 | 379 | 	or	SP, ZERO, r5 | 
 | 380 | 	getcon	KCR1, SP | 
 | 381 | 	pta	handle_exception, tr0 | 
 | 382 | 	blink	tr0, ZERO | 
 | 383 |  | 
 | 384 | /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE | 
 | 385 |    DOES END UP AT VBR+0x600 */ | 
 | 386 | 	nop | 
 | 387 | 	nop | 
 | 388 | 	nop | 
 | 389 | 	nop | 
 | 390 | 	nop | 
 | 391 | 	nop | 
 | 392 |  | 
 | 393 | 	.balign 256 | 
 | 394 | 	/* VBR + 0x600 */ | 
 | 395 |  | 
 | 396 | interrupt: | 
 | 397 | 	synco	/* TAKum03020 (but probably a good idea anyway.) */ | 
 | 398 | 	/* Save original stack pointer into KCR1 */ | 
 | 399 | 	putcon	SP, KCR1 | 
 | 400 |  | 
 | 401 | 	/* Save other original registers into reg_save_area */ | 
 | 402 |         movi  reg_save_area, SP | 
 | 403 | 	st.q	SP, SAVED_R2, r2 | 
 | 404 | 	st.q	SP, SAVED_R3, r3 | 
 | 405 | 	st.q	SP, SAVED_R4, r4 | 
 | 406 | 	st.q	SP, SAVED_R5, r5 | 
 | 407 | 	st.q	SP, SAVED_R6, r6 | 
 | 408 | 	st.q	SP, SAVED_R18, r18 | 
 | 409 | 	gettr	tr0, r3 | 
 | 410 | 	st.q	SP, SAVED_TR0, r3 | 
 | 411 |  | 
 | 412 | 	/* Set args for interrupt class handler */ | 
 | 413 | 	getcon	INTEVT, r2 | 
 | 414 | 	movi	ret_from_irq, r3 | 
 | 415 | 	ori	r3, 1, r3 | 
 | 416 | 	movi	EVENT_INTERRUPT, r4 | 
 | 417 | 	or	SP, ZERO, r5 | 
 | 418 | 	getcon	KCR1, SP | 
 | 419 | 	pta	handle_exception, tr0 | 
 | 420 | 	blink	tr0, ZERO | 
 | 421 | 	.balign	TEXT_SIZE		/* let's waste the bare minimum */ | 
 | 422 |  | 
 | 423 | LVBR_block_end:				/* Marker. Used for total checking */ | 
 | 424 |  | 
 | 425 | 	.balign 256 | 
 | 426 | LRESVEC_block: | 
 | 427 | 	/* Panic handler. Called with MMU off. Possible causes/actions: | 
 | 428 | 	 * - Reset:		Jump to program start. | 
 | 429 | 	 * - Single Step:	Turn off Single Step & return. | 
 | 430 | 	 * - Others:		Call panic handler, passing PC as arg. | 
 | 431 | 	 *			(this may need to be extended...) | 
 | 432 | 	 */ | 
 | 433 | reset_or_panic: | 
 | 434 | 	synco	/* TAKum03020 (but probably a good idea anyway.) */ | 
 | 435 | 	putcon	SP, DCR | 
 | 436 | 	/* First save r0-1 and tr0, as we need to use these */ | 
 | 437 | 	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | 
 | 438 | 	st.q	SP, 0, r0 | 
 | 439 | 	st.q	SP, 8, r1 | 
 | 440 | 	gettr	tr0, r0 | 
 | 441 | 	st.q	SP, 32, r0 | 
 | 442 |  | 
 | 443 | 	/* Check cause */ | 
 | 444 | 	getcon	EXPEVT, r0 | 
 | 445 | 	movi	RESET_CAUSE, r1 | 
 | 446 | 	sub	r1, r0, r1		/* r1=0 if reset */ | 
 | 447 | 	movi	_stext-CONFIG_CACHED_MEMORY_OFFSET, r0 | 
 | 448 | 	ori	r0, 1, r0 | 
 | 449 | 	ptabs	r0, tr0 | 
 | 450 | 	beqi	r1, 0, tr0		/* Jump to start address if reset */ | 
 | 451 |  | 
 | 452 | 	getcon	EXPEVT, r0 | 
 | 453 | 	movi	DEBUGSS_CAUSE, r1 | 
 | 454 | 	sub	r1, r0, r1		/* r1=0 if single step */ | 
 | 455 | 	pta	single_step_panic, tr0 | 
 | 456 | 	beqi	r1, 0, tr0		/* jump if single step */ | 
 | 457 |  | 
 | 458 | 	/* Now jump to where we save the registers. */ | 
 | 459 | 	movi	panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1 | 
 | 460 | 	ptabs	r1, tr0 | 
 | 461 | 	blink	tr0, r63 | 
 | 462 |  | 
 | 463 | single_step_panic: | 
 | 464 | 	/* We are in a handler with Single Step set. We need to resume the | 
 | 465 | 	 * handler, by turning on MMU & turning off Single Step. */ | 
 | 466 | 	getcon	SSR, r0 | 
 | 467 | 	movi	SR_MMU, r1 | 
 | 468 | 	or	r0, r1, r0 | 
 | 469 | 	movi	~SR_SS, r1 | 
 | 470 | 	and	r0, r1, r0 | 
 | 471 | 	putcon	r0, SSR | 
 | 472 | 	/* Restore EXPEVT, as the rte won't do this */ | 
 | 473 | 	getcon	PEXPEVT, r0 | 
 | 474 | 	putcon	r0, EXPEVT | 
 | 475 | 	/* Restore regs */ | 
 | 476 | 	ld.q	SP, 32, r0 | 
 | 477 | 	ptabs	r0, tr0 | 
 | 478 | 	ld.q	SP, 0, r0 | 
 | 479 | 	ld.q	SP, 8, r1 | 
 | 480 | 	getcon	DCR, SP | 
 | 481 | 	synco | 
 | 482 | 	rte | 
 | 483 |  | 
 | 484 |  | 
 | 485 | 	.balign	256 | 
 | 486 | debug_exception: | 
 | 487 | 	synco	/* TAKum03020 (but probably a good idea anyway.) */ | 
 | 488 | 	/* | 
 | 489 | 	 * Single step/software_break_point first level handler. | 
 | 490 | 	 * Called with MMU off, so the first thing we do is enable it | 
 | 491 | 	 * by doing an rte with appropriate SSR. | 
 | 492 | 	 */ | 
 | 493 | 	putcon	SP, DCR | 
 | 494 | 	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */ | 
 | 495 | 	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | 
 | 496 |  | 
 | 497 | 	/* With the MMU off, we are bypassing the cache, so purge any | 
 | 498 |          * data that will be made stale by the following stores. | 
 | 499 |          */ | 
 | 500 | 	ocbp	SP, 0 | 
 | 501 | 	synco | 
 | 502 |  | 
 | 503 | 	st.q	SP, 0, r0 | 
 | 504 | 	st.q	SP, 8, r1 | 
 | 505 | 	getcon	SPC, r0 | 
 | 506 | 	st.q	SP, 16, r0 | 
 | 507 | 	getcon	SSR, r0 | 
 | 508 | 	st.q	SP, 24, r0 | 
 | 509 |  | 
 | 510 | 	/* Enable MMU, block exceptions, set priv mode, disable single step */ | 
 | 511 | 	movi	SR_MMU | SR_BL | SR_MD, r1 | 
 | 512 | 	or	r0, r1, r0 | 
 | 513 | 	movi	~SR_SS, r1 | 
 | 514 | 	and	r0, r1, r0 | 
 | 515 | 	putcon	r0, SSR | 
 | 516 | 	/* Force control to debug_exception_2 when rte is executed */ | 
 | 517 | 	movi	debug_exeception_2, r0 | 
 | 518 | 	ori	r0, 1, r0      /* force SHmedia, just in case */ | 
 | 519 | 	putcon	r0, SPC | 
 | 520 | 	getcon	DCR, SP | 
 | 521 | 	synco | 
 | 522 | 	rte | 
 | 523 | debug_exeception_2: | 
 | 524 | 	/* Restore saved regs */ | 
 | 525 | 	putcon	SP, KCR1 | 
 | 526 | 	movi	resvec_save_area, SP | 
 | 527 | 	ld.q	SP, 24, r0 | 
 | 528 | 	putcon	r0, SSR | 
 | 529 | 	ld.q	SP, 16, r0 | 
 | 530 | 	putcon	r0, SPC | 
 | 531 | 	ld.q	SP, 0, r0 | 
 | 532 | 	ld.q	SP, 8, r1 | 
 | 533 |  | 
 | 534 | 	/* Save other original registers into reg_save_area */ | 
 | 535 |         movi  reg_save_area, SP | 
 | 536 | 	st.q	SP, SAVED_R2, r2 | 
 | 537 | 	st.q	SP, SAVED_R3, r3 | 
 | 538 | 	st.q	SP, SAVED_R4, r4 | 
 | 539 | 	st.q	SP, SAVED_R5, r5 | 
 | 540 | 	st.q	SP, SAVED_R6, r6 | 
 | 541 | 	st.q	SP, SAVED_R18, r18 | 
 | 542 | 	gettr	tr0, r3 | 
 | 543 | 	st.q	SP, SAVED_TR0, r3 | 
 | 544 |  | 
 | 545 | 	/* Set args for debug class handler */ | 
 | 546 | 	getcon	EXPEVT, r2 | 
 | 547 | 	movi	ret_from_exception, r3 | 
 | 548 | 	ori	r3, 1, r3 | 
 | 549 | 	movi	EVENT_DEBUG, r4 | 
 | 550 | 	or	SP, ZERO, r5 | 
 | 551 | 	getcon	KCR1, SP | 
 | 552 | 	pta	handle_exception, tr0 | 
 | 553 | 	blink	tr0, ZERO | 
 | 554 |  | 
 | 555 | 	.balign	256 | 
 | 556 | debug_interrupt: | 
 | 557 | 	/* !!! WE COME HERE IN REAL MODE !!! */ | 
 | 558 | 	/* Hook-up debug interrupt to allow various debugging options to be | 
 | 559 | 	 * hooked into its handler. */ | 
 | 560 | 	/* Save original stack pointer into KCR1 */ | 
 | 561 | 	synco | 
 | 562 | 	putcon	SP, KCR1 | 
 | 563 | 	movi	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | 
 | 564 | 	ocbp	SP, 0 | 
 | 565 | 	ocbp	SP, 32 | 
 | 566 | 	synco | 
 | 567 |  | 
 | 568 | 	/* Save other original registers into reg_save_area thru real addresses */ | 
 | 569 | 	st.q	SP, SAVED_R2, r2 | 
 | 570 | 	st.q	SP, SAVED_R3, r3 | 
 | 571 | 	st.q	SP, SAVED_R4, r4 | 
 | 572 | 	st.q	SP, SAVED_R5, r5 | 
 | 573 | 	st.q	SP, SAVED_R6, r6 | 
 | 574 | 	st.q	SP, SAVED_R18, r18 | 
 | 575 | 	gettr	tr0, r3 | 
 | 576 | 	st.q	SP, SAVED_TR0, r3 | 
 | 577 |  | 
 | 578 | 	/* move (spc,ssr)->(pspc,pssr).  The rte will shift | 
 | 579 | 	   them back again, so that they look like the originals | 
 | 580 | 	   as far as the real handler code is concerned. */ | 
 | 581 | 	getcon	spc, r6 | 
 | 582 | 	putcon	r6, pspc | 
 | 583 | 	getcon	ssr, r6 | 
 | 584 | 	putcon	r6, pssr | 
 | 585 |  | 
 | 586 | 	! construct useful SR for handle_exception | 
 | 587 | 	movi	3, r6 | 
 | 588 | 	shlli	r6, 30, r6 | 
 | 589 | 	getcon	sr, r18 | 
 | 590 | 	or	r18, r6, r6 | 
 | 591 | 	putcon	r6, ssr | 
 | 592 |  | 
 | 593 | 	! SSR is now the current SR with the MD and MMU bits set | 
 | 594 | 	! i.e. the rte will switch back to priv mode and put | 
 | 595 | 	! the mmu back on | 
 | 596 |  | 
 | 597 | 	! construct spc | 
 | 598 | 	movi	handle_exception, r18 | 
 | 599 | 	ori	r18, 1, r18		! for safety (do we need this?) | 
 | 600 | 	putcon	r18, spc | 
 | 601 |  | 
 | 602 | 	/* Set args for Non-debug, Not a TLB miss class handler */ | 
 | 603 |  | 
 | 604 | 	! EXPEVT==0x80 is unused, so 'steal' this value to put the | 
 | 605 | 	! debug interrupt handler in the vectoring table | 
 | 606 | 	movi	0x80, r2 | 
 | 607 | 	movi	ret_from_exception, r3 | 
 | 608 | 	ori	r3, 1, r3 | 
 | 609 | 	movi	EVENT_FAULT_NOT_TLB, r4 | 
 | 610 |  | 
 | 611 | 	or	SP, ZERO, r5 | 
 | 612 | 	movi	CONFIG_CACHED_MEMORY_OFFSET, r6 | 
 | 613 | 	add	r6, r5, r5 | 
 | 614 | 	getcon	KCR1, SP | 
 | 615 |  | 
 | 616 | 	synco	! for safety | 
 | 617 | 	rte	! -> handle_exception, switch back to priv mode again | 
 | 618 |  | 
 | 619 | LRESVEC_block_end:			/* Marker. Unused. */ | 
 | 620 |  | 
 | 621 | 	.balign	TEXT_SIZE | 
 | 622 |  | 
 | 623 | /* | 
 | 624 |  * Second level handler for VBR-based exceptions. Pre-handler. | 
 | 625 |  * In common to all stack-frame sensitive handlers. | 
 | 626 |  * | 
 | 627 |  * Inputs: | 
 | 628 |  * (KCR0) Current [current task union] | 
 | 629 |  * (KCR1) Original SP | 
 | 630 |  * (r2)   INTEVT/EXPEVT | 
 | 631 |  * (r3)   appropriate return address | 
 | 632 |  * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug) | 
 | 633 |  * (r5)   Pointer to reg_save_area | 
 | 634 |  * (SP)   Original SP | 
 | 635 |  * | 
 | 636 |  * Available registers: | 
 | 637 |  * (r6) | 
 | 638 |  * (r18) | 
 | 639 |  * (tr0) | 
 | 640 |  * | 
 | 641 |  */ | 
 | 642 | handle_exception: | 
 | 643 | 	/* Common 2nd level handler. */ | 
 | 644 |  | 
 | 645 | 	/* First thing we need an appropriate stack pointer */ | 
 | 646 | 	getcon	SSR, r6 | 
 | 647 | 	shlri	r6, 30, r6 | 
 | 648 | 	andi	r6, 1, r6 | 
 | 649 | 	pta	stack_ok, tr0 | 
 | 650 | 	bne	r6, ZERO, tr0		/* Original stack pointer is fine */ | 
 | 651 |  | 
 | 652 | 	/* Set stack pointer for user fault */ | 
 | 653 | 	getcon	KCR0, SP | 
 | 654 | 	movi	THREAD_SIZE, r6		/* Point to the end */ | 
 | 655 | 	add	SP, r6, SP | 
 | 656 |  | 
 | 657 | stack_ok: | 
 | 658 |  | 
 | 659 | /* DEBUG : check for underflow/overflow of the kernel stack */ | 
 | 660 | 	pta	no_underflow, tr0 | 
 | 661 | 	getcon  KCR0, r6 | 
 | 662 | 	movi	1024, r18 | 
 | 663 | 	add	r6, r18, r6 | 
 | 664 | 	bge	SP, r6, tr0 	! ? below 1k from bottom of stack : danger zone | 
 | 665 |  | 
 | 666 | /* Just panic to cause a crash. */ | 
 | 667 | bad_sp: | 
 | 668 | 	ld.b	r63, 0, r6 | 
 | 669 | 	nop | 
 | 670 |  | 
 | 671 | no_underflow: | 
 | 672 | 	pta	bad_sp, tr0 | 
 | 673 | 	getcon	kcr0, r6 | 
 | 674 | 	movi	THREAD_SIZE, r18 | 
 | 675 | 	add	r18, r6, r6 | 
 | 676 | 	bgt	SP, r6, tr0	! sp above the stack | 
 | 677 |  | 
 | 678 | 	/* Make some room for the BASIC frame. */ | 
 | 679 | 	movi	-(FRAME_SIZE), r6 | 
 | 680 | 	add	SP, r6, SP | 
 | 681 |  | 
 | 682 | /* Could do this with no stalling if we had another spare register, but the | 
 | 683 |    code below will be OK. */ | 
 | 684 | 	ld.q	r5, SAVED_R2, r6 | 
 | 685 | 	ld.q	r5, SAVED_R3, r18 | 
 | 686 | 	st.q	SP, FRAME_R(2), r6 | 
 | 687 | 	ld.q	r5, SAVED_R4, r6 | 
 | 688 | 	st.q	SP, FRAME_R(3), r18 | 
 | 689 | 	ld.q	r5, SAVED_R5, r18 | 
 | 690 | 	st.q	SP, FRAME_R(4), r6 | 
 | 691 | 	ld.q	r5, SAVED_R6, r6 | 
 | 692 | 	st.q	SP, FRAME_R(5), r18 | 
 | 693 | 	ld.q	r5, SAVED_R18, r18 | 
 | 694 | 	st.q	SP, FRAME_R(6), r6 | 
 | 695 | 	ld.q	r5, SAVED_TR0, r6 | 
 | 696 | 	st.q	SP, FRAME_R(18), r18 | 
 | 697 | 	st.q	SP, FRAME_T(0), r6 | 
 | 698 |  | 
 | 699 | 	/* Keep old SP around */ | 
 | 700 | 	getcon	KCR1, r6 | 
 | 701 |  | 
 | 702 | 	/* Save the rest of the general purpose registers */ | 
 | 703 | 	st.q	SP, FRAME_R(0), r0 | 
 | 704 | 	st.q	SP, FRAME_R(1), r1 | 
 | 705 | 	st.q	SP, FRAME_R(7), r7 | 
 | 706 | 	st.q	SP, FRAME_R(8), r8 | 
 | 707 | 	st.q	SP, FRAME_R(9), r9 | 
 | 708 | 	st.q	SP, FRAME_R(10), r10 | 
 | 709 | 	st.q	SP, FRAME_R(11), r11 | 
 | 710 | 	st.q	SP, FRAME_R(12), r12 | 
 | 711 | 	st.q	SP, FRAME_R(13), r13 | 
 | 712 | 	st.q	SP, FRAME_R(14), r14 | 
 | 713 |  | 
 | 714 | 	/* SP is somewhere else */ | 
 | 715 | 	st.q	SP, FRAME_R(15), r6 | 
 | 716 |  | 
 | 717 | 	st.q	SP, FRAME_R(16), r16 | 
 | 718 | 	st.q	SP, FRAME_R(17), r17 | 
 | 719 | 	/* r18 is saved earlier. */ | 
 | 720 | 	st.q	SP, FRAME_R(19), r19 | 
 | 721 | 	st.q	SP, FRAME_R(20), r20 | 
 | 722 | 	st.q	SP, FRAME_R(21), r21 | 
 | 723 | 	st.q	SP, FRAME_R(22), r22 | 
 | 724 | 	st.q	SP, FRAME_R(23), r23 | 
 | 725 | 	st.q	SP, FRAME_R(24), r24 | 
 | 726 | 	st.q	SP, FRAME_R(25), r25 | 
 | 727 | 	st.q	SP, FRAME_R(26), r26 | 
 | 728 | 	st.q	SP, FRAME_R(27), r27 | 
 | 729 | 	st.q	SP, FRAME_R(28), r28 | 
 | 730 | 	st.q	SP, FRAME_R(29), r29 | 
 | 731 | 	st.q	SP, FRAME_R(30), r30 | 
 | 732 | 	st.q	SP, FRAME_R(31), r31 | 
 | 733 | 	st.q	SP, FRAME_R(32), r32 | 
 | 734 | 	st.q	SP, FRAME_R(33), r33 | 
 | 735 | 	st.q	SP, FRAME_R(34), r34 | 
 | 736 | 	st.q	SP, FRAME_R(35), r35 | 
 | 737 | 	st.q	SP, FRAME_R(36), r36 | 
 | 738 | 	st.q	SP, FRAME_R(37), r37 | 
 | 739 | 	st.q	SP, FRAME_R(38), r38 | 
 | 740 | 	st.q	SP, FRAME_R(39), r39 | 
 | 741 | 	st.q	SP, FRAME_R(40), r40 | 
 | 742 | 	st.q	SP, FRAME_R(41), r41 | 
 | 743 | 	st.q	SP, FRAME_R(42), r42 | 
 | 744 | 	st.q	SP, FRAME_R(43), r43 | 
 | 745 | 	st.q	SP, FRAME_R(44), r44 | 
 | 746 | 	st.q	SP, FRAME_R(45), r45 | 
 | 747 | 	st.q	SP, FRAME_R(46), r46 | 
 | 748 | 	st.q	SP, FRAME_R(47), r47 | 
 | 749 | 	st.q	SP, FRAME_R(48), r48 | 
 | 750 | 	st.q	SP, FRAME_R(49), r49 | 
 | 751 | 	st.q	SP, FRAME_R(50), r50 | 
 | 752 | 	st.q	SP, FRAME_R(51), r51 | 
 | 753 | 	st.q	SP, FRAME_R(52), r52 | 
 | 754 | 	st.q	SP, FRAME_R(53), r53 | 
 | 755 | 	st.q	SP, FRAME_R(54), r54 | 
 | 756 | 	st.q	SP, FRAME_R(55), r55 | 
 | 757 | 	st.q	SP, FRAME_R(56), r56 | 
 | 758 | 	st.q	SP, FRAME_R(57), r57 | 
 | 759 | 	st.q	SP, FRAME_R(58), r58 | 
 | 760 | 	st.q	SP, FRAME_R(59), r59 | 
 | 761 | 	st.q	SP, FRAME_R(60), r60 | 
 | 762 | 	st.q	SP, FRAME_R(61), r61 | 
 | 763 | 	st.q	SP, FRAME_R(62), r62 | 
 | 764 |  | 
 | 765 | 	/* | 
 | 766 | 	 * Save the S* registers. | 
 | 767 | 	 */ | 
 | 768 | 	getcon	SSR, r61 | 
 | 769 | 	st.q	SP, FRAME_S(FSSR), r61 | 
 | 770 | 	getcon	SPC, r62 | 
 | 771 | 	st.q	SP, FRAME_S(FSPC), r62 | 
 | 772 | 	movi	-1, r62			/* Reset syscall_nr */ | 
 | 773 | 	st.q	SP, FRAME_S(FSYSCALL_ID), r62 | 
 | 774 |  | 
 | 775 | 	/* Save the rest of the target registers */ | 
 | 776 | 	gettr	tr1, r6 | 
 | 777 | 	st.q	SP, FRAME_T(1), r6 | 
 | 778 | 	gettr	tr2, r6 | 
 | 779 | 	st.q	SP, FRAME_T(2), r6 | 
 | 780 | 	gettr	tr3, r6 | 
 | 781 | 	st.q	SP, FRAME_T(3), r6 | 
 | 782 | 	gettr	tr4, r6 | 
 | 783 | 	st.q	SP, FRAME_T(4), r6 | 
 | 784 | 	gettr	tr5, r6 | 
 | 785 | 	st.q	SP, FRAME_T(5), r6 | 
 | 786 | 	gettr	tr6, r6 | 
 | 787 | 	st.q	SP, FRAME_T(6), r6 | 
 | 788 | 	gettr	tr7, r6 | 
 | 789 | 	st.q	SP, FRAME_T(7), r6 | 
 | 790 |  | 
 | 791 | 	! setup FP so that unwinder can wind back through nested kernel mode | 
 | 792 | 	! exceptions | 
 | 793 | 	add	SP, ZERO, r14 | 
 | 794 |  | 
 | 795 | #ifdef CONFIG_POOR_MANS_STRACE | 
 | 796 | 	/* We've pushed all the registers now, so only r2-r4 hold anything | 
 | 797 | 	 * useful. Move them into callee save registers */ | 
 | 798 | 	or	r2, ZERO, r28 | 
 | 799 | 	or	r3, ZERO, r29 | 
 | 800 | 	or	r4, ZERO, r30 | 
 | 801 |  | 
 | 802 | 	/* Preserve r2 as the event code */ | 
 | 803 | 	movi	evt_debug, r3 | 
 | 804 | 	ori	r3, 1, r3 | 
 | 805 | 	ptabs	r3, tr0 | 
 | 806 |  | 
 | 807 | 	or	SP, ZERO, r6 | 
 | 808 | 	getcon	TRA, r5 | 
 | 809 | 	blink	tr0, LINK | 
 | 810 |  | 
 | 811 | 	or	r28, ZERO, r2 | 
 | 812 | 	or	r29, ZERO, r3 | 
 | 813 | 	or	r30, ZERO, r4 | 
 | 814 | #endif | 
 | 815 |  | 
 | 816 | 	/* For syscall and debug race condition, get TRA now */ | 
 | 817 | 	getcon	TRA, r5 | 
 | 818 |  | 
 | 819 | 	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf | 
 | 820 | 	 * Also set FD, to catch FPU usage in the kernel. | 
 | 821 | 	 * | 
 | 822 | 	 * benedict.gaster@superh.com 29/07/2002 | 
 | 823 | 	 * | 
 | 824 | 	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the | 
 | 825 | 	 * same time change BL from 1->0, as any pending interrupt of a level | 
 | 826 | 	 * higher than he previous value of IMASK will leak through and be | 
 | 827 | 	 * taken unexpectedly. | 
 | 828 | 	 * | 
 | 829 | 	 * To avoid this we raise the IMASK and then issue another PUTCON to | 
 | 830 | 	 * enable interrupts. | 
 | 831 |          */ | 
 | 832 | 	getcon	SR, r6 | 
 | 833 | 	movi	SR_IMASK | SR_FD, r7 | 
 | 834 | 	or	r6, r7, r6 | 
 | 835 | 	putcon	r6, SR | 
 | 836 | 	movi	SR_UNBLOCK_EXC, r7 | 
 | 837 | 	and	r6, r7, r6 | 
 | 838 | 	putcon	r6, SR | 
 | 839 |  | 
 | 840 |  | 
 | 841 | 	/* Now call the appropriate 3rd level handler */ | 
 | 842 | 	or	r3, ZERO, LINK | 
 | 843 | 	movi	trap_jtable, r3 | 
 | 844 | 	shlri	r2, 3, r2 | 
 | 845 | 	ldx.l	r2, r3, r3 | 
 | 846 | 	shlri	r2, 2, r2 | 
 | 847 | 	ptabs	r3, tr0 | 
 | 848 | 	or	SP, ZERO, r3 | 
 | 849 | 	blink	tr0, ZERO | 
 | 850 |  | 
 | 851 | /* | 
 | 852 |  * Second level handler for VBR-based exceptions. Post-handlers. | 
 | 853 |  * | 
 | 854 |  * Post-handlers for interrupts (ret_from_irq), exceptions | 
 | 855 |  * (ret_from_exception) and common reentrance doors (restore_all | 
 | 856 |  * to get back to the original context, ret_from_syscall loop to | 
 | 857 |  * check kernel exiting). | 
 | 858 |  * | 
 | 859 |  * ret_with_reschedule and work_notifysig are an inner lables of | 
 | 860 |  * the ret_from_syscall loop. | 
 | 861 |  * | 
 | 862 |  * In common to all stack-frame sensitive handlers. | 
 | 863 |  * | 
 | 864 |  * Inputs: | 
 | 865 |  * (SP)   struct pt_regs *, original register's frame pointer (basic) | 
 | 866 |  * | 
 | 867 |  */ | 
 | 868 | 	.global ret_from_irq | 
 | 869 | ret_from_irq: | 
 | 870 | #ifdef CONFIG_POOR_MANS_STRACE | 
 | 871 | 	pta	evt_debug_ret_from_irq, tr0 | 
 | 872 | 	ori	SP, 0, r2 | 
 | 873 | 	blink	tr0, LINK | 
 | 874 | #endif | 
 | 875 | 	ld.q	SP, FRAME_S(FSSR), r6 | 
 | 876 | 	shlri	r6, 30, r6 | 
 | 877 | 	andi	r6, 1, r6 | 
 | 878 | 	pta	resume_kernel, tr0 | 
 | 879 | 	bne	r6, ZERO, tr0		/* no further checks */ | 
 | 880 | 	STI() | 
 | 881 | 	pta	ret_with_reschedule, tr0 | 
 | 882 | 	blink	tr0, ZERO		/* Do not check softirqs */ | 
 | 883 |  | 
 | 884 | 	.global ret_from_exception | 
 | 885 | ret_from_exception: | 
 | 886 | 	preempt_stop() | 
 | 887 |  | 
 | 888 | #ifdef CONFIG_POOR_MANS_STRACE | 
 | 889 | 	pta	evt_debug_ret_from_exc, tr0 | 
 | 890 | 	ori	SP, 0, r2 | 
 | 891 | 	blink	tr0, LINK | 
 | 892 | #endif | 
 | 893 |  | 
 | 894 | 	ld.q	SP, FRAME_S(FSSR), r6 | 
 | 895 | 	shlri	r6, 30, r6 | 
 | 896 | 	andi	r6, 1, r6 | 
 | 897 | 	pta	resume_kernel, tr0 | 
 | 898 | 	bne	r6, ZERO, tr0		/* no further checks */ | 
 | 899 |  | 
 | 900 | 	/* Check softirqs */ | 
 | 901 |  | 
 | 902 | #ifdef CONFIG_PREEMPT | 
 | 903 | 	pta   ret_from_syscall, tr0 | 
 | 904 | 	blink   tr0, ZERO | 
 | 905 |  | 
 | 906 | resume_kernel: | 
 | 907 | 	pta	restore_all, tr0 | 
 | 908 |  | 
 | 909 | 	getcon	KCR0, r6 | 
 | 910 | 	ld.l	r6, TI_PRE_COUNT, r7 | 
 | 911 | 	beq/u	r7, ZERO, tr0 | 
 | 912 |  | 
 | 913 | need_resched: | 
 | 914 | 	ld.l	r6, TI_FLAGS, r7 | 
 | 915 | 	movi	(1 << TIF_NEED_RESCHED), r8 | 
 | 916 | 	and	r8, r7, r8 | 
 | 917 | 	bne	r8, ZERO, tr0 | 
 | 918 |  | 
 | 919 | 	getcon	SR, r7 | 
 | 920 | 	andi	r7, 0xf0, r7 | 
 | 921 | 	bne	r7, ZERO, tr0 | 
 | 922 |  | 
 | 923 | 	movi	((PREEMPT_ACTIVE >> 16) & 65535), r8 | 
 | 924 | 	shori	(PREEMPT_ACTIVE & 65535), r8 | 
 | 925 | 	st.l	r6, TI_PRE_COUNT, r8 | 
 | 926 |  | 
 | 927 | 	STI() | 
 | 928 | 	movi	schedule, r7 | 
 | 929 | 	ori	r7, 1, r7 | 
 | 930 | 	ptabs	r7, tr1 | 
 | 931 | 	blink	tr1, LINK | 
 | 932 |  | 
 | 933 | 	st.l	r6, TI_PRE_COUNT, ZERO | 
 | 934 | 	CLI() | 
 | 935 |  | 
 | 936 | 	pta	need_resched, tr1 | 
 | 937 | 	blink	tr1, ZERO | 
 | 938 | #endif | 
 | 939 |  | 
 | 940 | 	.global ret_from_syscall | 
 | 941 | ret_from_syscall: | 
 | 942 |  | 
 | 943 | ret_with_reschedule: | 
 | 944 | 	getcon	KCR0, r6		! r6 contains current_thread_info | 
 | 945 | 	ld.l	r6, TI_FLAGS, r7	! r7 contains current_thread_info->flags | 
 | 946 |  | 
 | 947 | 	! FIXME:!!! | 
 | 948 | 	! no handling of TIF_SYSCALL_TRACE yet!! | 
 | 949 |  | 
 | 950 | 	movi	(1 << TIF_NEED_RESCHED), r8 | 
 | 951 | 	and	r8, r7, r8 | 
 | 952 | 	pta	work_resched, tr0 | 
 | 953 | 	bne	r8, ZERO, tr0 | 
 | 954 |  | 
 | 955 | 	pta	restore_all, tr1 | 
 | 956 |  | 
 | 957 | 	movi	(1 << TIF_SIGPENDING), r8 | 
 | 958 | 	and	r8, r7, r8 | 
 | 959 | 	pta	work_notifysig, tr0 | 
 | 960 | 	bne	r8, ZERO, tr0 | 
 | 961 |  | 
 | 962 | 	blink	tr1, ZERO | 
 | 963 |  | 
 | 964 | work_resched: | 
 | 965 | 	pta	ret_from_syscall, tr0 | 
 | 966 | 	gettr	tr0, LINK | 
 | 967 | 	movi	schedule, r6 | 
 | 968 | 	ptabs	r6, tr0 | 
 | 969 | 	blink	tr0, ZERO		/* Call schedule(), return on top */ | 
 | 970 |  | 
 | 971 | work_notifysig: | 
 | 972 | 	gettr	tr1, LINK | 
 | 973 |  | 
 | 974 | 	movi	do_signal, r6 | 
 | 975 | 	ptabs	r6, tr0 | 
 | 976 | 	or	SP, ZERO, r2 | 
 | 977 | 	or	ZERO, ZERO, r3 | 
 | 978 | 	blink	tr0, LINK	    /* Call do_signal(regs, 0), return here */ | 
 | 979 |  | 
 | 980 | restore_all: | 
 | 981 | 	/* Do prefetches */ | 
 | 982 |  | 
 | 983 | 	ld.q	SP, FRAME_T(0), r6 | 
 | 984 | 	ld.q	SP, FRAME_T(1), r7 | 
 | 985 | 	ld.q	SP, FRAME_T(2), r8 | 
 | 986 | 	ld.q	SP, FRAME_T(3), r9 | 
 | 987 | 	ptabs	r6, tr0 | 
 | 988 | 	ptabs	r7, tr1 | 
 | 989 | 	ptabs	r8, tr2 | 
 | 990 | 	ptabs	r9, tr3 | 
 | 991 | 	ld.q	SP, FRAME_T(4), r6 | 
 | 992 | 	ld.q	SP, FRAME_T(5), r7 | 
 | 993 | 	ld.q	SP, FRAME_T(6), r8 | 
 | 994 | 	ld.q	SP, FRAME_T(7), r9 | 
 | 995 | 	ptabs	r6, tr4 | 
 | 996 | 	ptabs	r7, tr5 | 
 | 997 | 	ptabs	r8, tr6 | 
 | 998 | 	ptabs	r9, tr7 | 
 | 999 |  | 
 | 1000 | 	ld.q	SP, FRAME_R(0), r0 | 
 | 1001 | 	ld.q	SP, FRAME_R(1), r1 | 
 | 1002 | 	ld.q	SP, FRAME_R(2), r2 | 
 | 1003 | 	ld.q	SP, FRAME_R(3), r3 | 
 | 1004 | 	ld.q	SP, FRAME_R(4), r4 | 
 | 1005 | 	ld.q	SP, FRAME_R(5), r5 | 
 | 1006 | 	ld.q	SP, FRAME_R(6), r6 | 
 | 1007 | 	ld.q	SP, FRAME_R(7), r7 | 
 | 1008 | 	ld.q	SP, FRAME_R(8), r8 | 
 | 1009 | 	ld.q	SP, FRAME_R(9), r9 | 
 | 1010 | 	ld.q	SP, FRAME_R(10), r10 | 
 | 1011 | 	ld.q	SP, FRAME_R(11), r11 | 
 | 1012 | 	ld.q	SP, FRAME_R(12), r12 | 
 | 1013 | 	ld.q	SP, FRAME_R(13), r13 | 
 | 1014 | 	ld.q	SP, FRAME_R(14), r14 | 
 | 1015 |  | 
 | 1016 | 	ld.q	SP, FRAME_R(16), r16 | 
 | 1017 | 	ld.q	SP, FRAME_R(17), r17 | 
 | 1018 | 	ld.q	SP, FRAME_R(18), r18 | 
 | 1019 | 	ld.q	SP, FRAME_R(19), r19 | 
 | 1020 | 	ld.q	SP, FRAME_R(20), r20 | 
 | 1021 | 	ld.q	SP, FRAME_R(21), r21 | 
 | 1022 | 	ld.q	SP, FRAME_R(22), r22 | 
 | 1023 | 	ld.q	SP, FRAME_R(23), r23 | 
 | 1024 | 	ld.q	SP, FRAME_R(24), r24 | 
 | 1025 | 	ld.q	SP, FRAME_R(25), r25 | 
 | 1026 | 	ld.q	SP, FRAME_R(26), r26 | 
 | 1027 | 	ld.q	SP, FRAME_R(27), r27 | 
 | 1028 | 	ld.q	SP, FRAME_R(28), r28 | 
 | 1029 | 	ld.q	SP, FRAME_R(29), r29 | 
 | 1030 | 	ld.q	SP, FRAME_R(30), r30 | 
 | 1031 | 	ld.q	SP, FRAME_R(31), r31 | 
 | 1032 | 	ld.q	SP, FRAME_R(32), r32 | 
 | 1033 | 	ld.q	SP, FRAME_R(33), r33 | 
 | 1034 | 	ld.q	SP, FRAME_R(34), r34 | 
 | 1035 | 	ld.q	SP, FRAME_R(35), r35 | 
 | 1036 | 	ld.q	SP, FRAME_R(36), r36 | 
 | 1037 | 	ld.q	SP, FRAME_R(37), r37 | 
 | 1038 | 	ld.q	SP, FRAME_R(38), r38 | 
 | 1039 | 	ld.q	SP, FRAME_R(39), r39 | 
 | 1040 | 	ld.q	SP, FRAME_R(40), r40 | 
 | 1041 | 	ld.q	SP, FRAME_R(41), r41 | 
 | 1042 | 	ld.q	SP, FRAME_R(42), r42 | 
 | 1043 | 	ld.q	SP, FRAME_R(43), r43 | 
 | 1044 | 	ld.q	SP, FRAME_R(44), r44 | 
 | 1045 | 	ld.q	SP, FRAME_R(45), r45 | 
 | 1046 | 	ld.q	SP, FRAME_R(46), r46 | 
 | 1047 | 	ld.q	SP, FRAME_R(47), r47 | 
 | 1048 | 	ld.q	SP, FRAME_R(48), r48 | 
 | 1049 | 	ld.q	SP, FRAME_R(49), r49 | 
 | 1050 | 	ld.q	SP, FRAME_R(50), r50 | 
 | 1051 | 	ld.q	SP, FRAME_R(51), r51 | 
 | 1052 | 	ld.q	SP, FRAME_R(52), r52 | 
 | 1053 | 	ld.q	SP, FRAME_R(53), r53 | 
 | 1054 | 	ld.q	SP, FRAME_R(54), r54 | 
 | 1055 | 	ld.q	SP, FRAME_R(55), r55 | 
 | 1056 | 	ld.q	SP, FRAME_R(56), r56 | 
 | 1057 | 	ld.q	SP, FRAME_R(57), r57 | 
 | 1058 | 	ld.q	SP, FRAME_R(58), r58 | 
 | 1059 |  | 
 | 1060 | 	getcon	SR, r59 | 
 | 1061 | 	movi	SR_BLOCK_EXC, r60 | 
 | 1062 | 	or	r59, r60, r59 | 
 | 1063 | 	putcon	r59, SR			/* SR.BL = 1, keep nesting out */ | 
 | 1064 | 	ld.q	SP, FRAME_S(FSSR), r61 | 
 | 1065 | 	ld.q	SP, FRAME_S(FSPC), r62 | 
 | 1066 | 	movi	SR_ASID_MASK, r60 | 
 | 1067 | 	and	r59, r60, r59 | 
 | 1068 | 	andc	r61, r60, r61		/* Clear out older ASID */ | 
 | 1069 | 	or	r59, r61, r61		/* Retain current ASID */ | 
 | 1070 | 	putcon	r61, SSR | 
 | 1071 | 	putcon	r62, SPC | 
 | 1072 |  | 
 | 1073 | 	/* Ignore FSYSCALL_ID */ | 
 | 1074 |  | 
 | 1075 | 	ld.q	SP, FRAME_R(59), r59 | 
 | 1076 | 	ld.q	SP, FRAME_R(60), r60 | 
 | 1077 | 	ld.q	SP, FRAME_R(61), r61 | 
 | 1078 | 	ld.q	SP, FRAME_R(62), r62 | 
 | 1079 |  | 
 | 1080 | 	/* Last touch */ | 
 | 1081 | 	ld.q	SP, FRAME_R(15), SP | 
 | 1082 | 	rte | 
 | 1083 | 	nop | 
 | 1084 |  | 
 | 1085 | /* | 
 | 1086 |  * Third level handlers for VBR-based exceptions. Adapting args to | 
 | 1087 |  * and/or deflecting to fourth level handlers. | 
 | 1088 |  * | 
 | 1089 |  * Fourth level handlers interface. | 
 | 1090 |  * Most are C-coded handlers directly pointed by the trap_jtable. | 
 | 1091 |  * (Third = Fourth level) | 
 | 1092 |  * Inputs: | 
 | 1093 |  * (r2)   fault/interrupt code, entry number (e.g. NMI = 14, | 
 | 1094 |  *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...) | 
 | 1095 |  * (r3)   struct pt_regs *, original register's frame pointer | 
 | 1096 |  * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault) | 
 | 1097 |  * (r5)   TRA control register (for syscall/debug benefit only) | 
 | 1098 |  * (LINK) return address | 
 | 1099 |  * (SP)   = r3 | 
 | 1100 |  * | 
 | 1101 |  * Kernel TLB fault handlers will get a slightly different interface. | 
 | 1102 |  * (r2)   struct pt_regs *, original register's frame pointer | 
 | 1103 |  * (r3)   writeaccess, whether it's a store fault as opposed to load fault | 
 | 1104 |  * (r4)   execaccess, whether it's a ITLB fault as opposed to DTLB fault | 
 | 1105 |  * (r5)   Effective Address of fault | 
 | 1106 |  * (LINK) return address | 
 | 1107 |  * (SP)   = r2 | 
 | 1108 |  * | 
 | 1109 |  * fpu_error_or_IRQ? is a helper to deflect to the right cause. | 
 | 1110 |  * | 
 | 1111 |  */ | 
 | 1112 | tlb_miss_load: | 
 | 1113 | 	or	SP, ZERO, r2 | 
 | 1114 | 	or	ZERO, ZERO, r3		/* Read */ | 
 | 1115 | 	or	ZERO, ZERO, r4		/* Data */ | 
 | 1116 | 	getcon	TEA, r5 | 
 | 1117 | 	pta	call_do_page_fault, tr0 | 
 | 1118 | 	beq	ZERO, ZERO, tr0 | 
 | 1119 |  | 
 | 1120 | tlb_miss_store: | 
 | 1121 | 	or	SP, ZERO, r2 | 
 | 1122 | 	movi	1, r3			/* Write */ | 
 | 1123 | 	or	ZERO, ZERO, r4		/* Data */ | 
 | 1124 | 	getcon	TEA, r5 | 
 | 1125 | 	pta	call_do_page_fault, tr0 | 
 | 1126 | 	beq	ZERO, ZERO, tr0 | 
 | 1127 |  | 
 | 1128 | itlb_miss_or_IRQ: | 
 | 1129 | 	pta	its_IRQ, tr0 | 
 | 1130 | 	beqi/u	r4, EVENT_INTERRUPT, tr0 | 
 | 1131 | 	or	SP, ZERO, r2 | 
 | 1132 | 	or	ZERO, ZERO, r3		/* Read */ | 
 | 1133 | 	movi	1, r4			/* Text */ | 
 | 1134 | 	getcon	TEA, r5 | 
 | 1135 | 	/* Fall through */ | 
 | 1136 |  | 
 | 1137 | call_do_page_fault: | 
 | 1138 | 	movi	do_page_fault, r6 | 
 | 1139 |         ptabs	r6, tr0 | 
 | 1140 |         blink	tr0, ZERO | 
 | 1141 |  | 
 | 1142 | fpu_error_or_IRQA: | 
 | 1143 | 	pta	its_IRQ, tr0 | 
 | 1144 | 	beqi/l	r4, EVENT_INTERRUPT, tr0 | 
 | 1145 | #ifdef CONFIG_SH_FPU | 
 | 1146 | 	movi	do_fpu_state_restore, r6 | 
 | 1147 | #else | 
 | 1148 | 	movi	do_exception_error, r6 | 
 | 1149 | #endif | 
 | 1150 | 	ptabs	r6, tr0 | 
 | 1151 | 	blink	tr0, ZERO | 
 | 1152 |  | 
 | 1153 | fpu_error_or_IRQB: | 
 | 1154 | 	pta	its_IRQ, tr0 | 
 | 1155 | 	beqi/l	r4, EVENT_INTERRUPT, tr0 | 
 | 1156 | #ifdef CONFIG_SH_FPU | 
 | 1157 | 	movi	do_fpu_state_restore, r6 | 
 | 1158 | #else | 
 | 1159 | 	movi	do_exception_error, r6 | 
 | 1160 | #endif | 
 | 1161 | 	ptabs	r6, tr0 | 
 | 1162 | 	blink	tr0, ZERO | 
 | 1163 |  | 
 | 1164 | its_IRQ: | 
 | 1165 | 	movi	do_IRQ, r6 | 
 | 1166 | 	ptabs	r6, tr0 | 
 | 1167 | 	blink	tr0, ZERO | 
 | 1168 |  | 
 | 1169 | /* | 
 | 1170 |  * system_call/unknown_trap third level handler: | 
 | 1171 |  * | 
 | 1172 |  * Inputs: | 
 | 1173 |  * (r2)   fault/interrupt code, entry number (TRAP = 11) | 
 | 1174 |  * (r3)   struct pt_regs *, original register's frame pointer | 
 | 1175 |  * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault) | 
 | 1176 |  * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr) | 
 | 1177 |  * (SP)   = r3 | 
 | 1178 |  * (LINK) return address: ret_from_exception | 
 | 1179 |  * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7) | 
 | 1180 |  * | 
 | 1181 |  * Outputs: | 
 | 1182 |  * (*r3)  Syscall reply (Saved r2) | 
 | 1183 |  * (LINK) In case of syscall only it can be scrapped. | 
 | 1184 |  *        Common second level post handler will be ret_from_syscall. | 
 | 1185 |  *        Common (non-trace) exit point to that is syscall_ret (saving | 
 | 1186 |  *        result to r2). Common bad exit point is syscall_bad (returning | 
 | 1187 |  *        ENOSYS then saved to r2). | 
 | 1188 |  * | 
 | 1189 |  */ | 
 | 1190 |  | 
 | 1191 | unknown_trap: | 
 | 1192 | 	/* Unknown Trap or User Trace */ | 
 | 1193 | 	movi	do_unknown_trapa, r6 | 
 | 1194 | 	ptabs	r6, tr0 | 
 | 1195 |         ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */ | 
 | 1196 |         andi    r2, 0x1ff, r2		/* r2 = syscall # */ | 
 | 1197 | 	blink	tr0, LINK | 
 | 1198 |  | 
 | 1199 | 	pta	syscall_ret, tr0 | 
 | 1200 | 	blink	tr0, ZERO | 
 | 1201 |  | 
 | 1202 |         /* New syscall implementation*/ | 
 | 1203 | system_call: | 
 | 1204 | 	pta	unknown_trap, tr0 | 
 | 1205 |         or      r5, ZERO, r4            /* TRA (=r5) -> r4 */ | 
 | 1206 |         shlri   r4, 20, r4 | 
 | 1207 | 	bnei	r4, 1, tr0		/* unknown_trap if not 0x1yzzzz */ | 
 | 1208 |  | 
 | 1209 |         /* It's a system call */ | 
 | 1210 | 	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */ | 
 | 1211 | 	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */ | 
 | 1212 |  | 
 | 1213 | 	STI() | 
 | 1214 |  | 
 | 1215 | 	pta	syscall_allowed, tr0 | 
 | 1216 | 	movi	NR_syscalls - 1, r4	/* Last valid */ | 
 | 1217 | 	bgeu/l	r4, r5, tr0 | 
 | 1218 |  | 
 | 1219 | syscall_bad: | 
 | 1220 | 	/* Return ENOSYS ! */ | 
 | 1221 | 	movi	-(ENOSYS), r2		/* Fall-through */ | 
 | 1222 |  | 
 | 1223 | 	.global syscall_ret | 
 | 1224 | syscall_ret: | 
 | 1225 | 	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */ | 
 | 1226 |  | 
 | 1227 | #ifdef CONFIG_POOR_MANS_STRACE | 
 | 1228 | 	/* nothing useful in registers at this point */ | 
 | 1229 |  | 
 | 1230 | 	movi	evt_debug2, r5 | 
 | 1231 | 	ori	r5, 1, r5 | 
 | 1232 | 	ptabs	r5, tr0 | 
 | 1233 | 	ld.q	SP, FRAME_R(9), r2 | 
 | 1234 | 	or	SP, ZERO, r3 | 
 | 1235 | 	blink	tr0, LINK | 
 | 1236 | #endif | 
 | 1237 |  | 
 | 1238 | 	ld.q	SP, FRAME_S(FSPC), r2 | 
 | 1239 | 	addi	r2, 4, r2		/* Move PC, being pre-execution event */ | 
 | 1240 | 	st.q	SP, FRAME_S(FSPC), r2 | 
 | 1241 | 	pta	ret_from_syscall, tr0 | 
 | 1242 | 	blink	tr0, ZERO | 
 | 1243 |  | 
 | 1244 |  | 
 | 1245 | /*  A different return path for ret_from_fork, because we now need | 
 | 1246 |  *  to call schedule_tail with the later kernels. Because prev is | 
 | 1247 |  *  loaded into r2 by switch_to() means we can just call it straight  away | 
 | 1248 |  */ | 
 | 1249 |  | 
 | 1250 | .global	ret_from_fork | 
 | 1251 | ret_from_fork: | 
 | 1252 |  | 
 | 1253 | 	movi	schedule_tail,r5 | 
 | 1254 | 	ori	r5, 1, r5 | 
 | 1255 | 	ptabs	r5, tr0 | 
 | 1256 | 	blink	tr0, LINK | 
 | 1257 |  | 
 | 1258 | #ifdef CONFIG_POOR_MANS_STRACE | 
 | 1259 | 	/* nothing useful in registers at this point */ | 
 | 1260 |  | 
 | 1261 | 	movi	evt_debug2, r5 | 
 | 1262 | 	ori	r5, 1, r5 | 
 | 1263 | 	ptabs	r5, tr0 | 
 | 1264 | 	ld.q	SP, FRAME_R(9), r2 | 
 | 1265 | 	or	SP, ZERO, r3 | 
 | 1266 | 	blink	tr0, LINK | 
 | 1267 | #endif | 
 | 1268 |  | 
 | 1269 | 	ld.q	SP, FRAME_S(FSPC), r2 | 
 | 1270 | 	addi	r2, 4, r2		/* Move PC, being pre-execution event */ | 
 | 1271 | 	st.q	SP, FRAME_S(FSPC), r2 | 
 | 1272 | 	pta	ret_from_syscall, tr0 | 
 | 1273 | 	blink	tr0, ZERO | 
 | 1274 |  | 
 | 1275 |  | 
 | 1276 |  | 
 | 1277 | syscall_allowed: | 
 | 1278 | 	/* Use LINK to deflect the exit point, default is syscall_ret */ | 
 | 1279 | 	pta	syscall_ret, tr0 | 
 | 1280 | 	gettr	tr0, LINK | 
 | 1281 | 	pta	syscall_notrace, tr0 | 
 | 1282 |  | 
 | 1283 | 	getcon	KCR0, r2 | 
 | 1284 | 	ld.l	r2, TI_FLAGS, r4 | 
 | 1285 | 	movi	(1 << TIF_SYSCALL_TRACE), r6 | 
 | 1286 | 	and	r6, r4, r6 | 
 | 1287 | 	beq/l	r6, ZERO, tr0 | 
 | 1288 |  | 
 | 1289 | 	/* Trace it by calling syscall_trace before and after */ | 
 | 1290 | 	movi	syscall_trace, r4 | 
 | 1291 | 	ptabs	r4, tr0 | 
 | 1292 | 	blink	tr0, LINK | 
 | 1293 | 	/* Reload syscall number as r5 is trashed by syscall_trace */ | 
 | 1294 | 	ld.q	SP, FRAME_S(FSYSCALL_ID), r5 | 
 | 1295 | 	andi	r5, 0x1ff, r5 | 
 | 1296 |  | 
 | 1297 | 	pta	syscall_ret_trace, tr0 | 
 | 1298 | 	gettr	tr0, LINK | 
 | 1299 |  | 
 | 1300 | syscall_notrace: | 
 | 1301 | 	/* Now point to the appropriate 4th level syscall handler */ | 
 | 1302 | 	movi	sys_call_table, r4 | 
 | 1303 | 	shlli	r5, 2, r5 | 
 | 1304 | 	ldx.l	r4, r5, r5 | 
 | 1305 | 	ptabs	r5, tr0 | 
 | 1306 |  | 
 | 1307 | 	/* Prepare original args */ | 
 | 1308 | 	ld.q	SP, FRAME_R(2), r2 | 
 | 1309 | 	ld.q	SP, FRAME_R(3), r3 | 
 | 1310 | 	ld.q	SP, FRAME_R(4), r4 | 
 | 1311 | 	ld.q	SP, FRAME_R(5), r5 | 
 | 1312 | 	ld.q	SP, FRAME_R(6), r6 | 
 | 1313 | 	ld.q	SP, FRAME_R(7), r7 | 
 | 1314 |  | 
 | 1315 | 	/* And now the trick for those syscalls requiring regs * ! */ | 
 | 1316 | 	or	SP, ZERO, r8 | 
 | 1317 |  | 
 | 1318 | 	/* Call it */ | 
 | 1319 | 	blink	tr0, ZERO	/* LINK is already properly set */ | 
 | 1320 |  | 
 | 1321 | syscall_ret_trace: | 
 | 1322 | 	/* We get back here only if under trace */ | 
 | 1323 | 	st.q	SP, FRAME_R(9), r2	/* Save return value */ | 
 | 1324 |  | 
 | 1325 | 	movi	syscall_trace, LINK | 
 | 1326 | 	ptabs	LINK, tr0 | 
 | 1327 | 	blink	tr0, LINK | 
 | 1328 |  | 
 | 1329 | 	/* This needs to be done after any syscall tracing */ | 
 | 1330 | 	ld.q	SP, FRAME_S(FSPC), r2 | 
 | 1331 | 	addi	r2, 4, r2	/* Move PC, being pre-execution event */ | 
 | 1332 | 	st.q	SP, FRAME_S(FSPC), r2 | 
 | 1333 |  | 
 | 1334 | 	pta	ret_from_syscall, tr0 | 
 | 1335 | 	blink	tr0, ZERO		/* Resume normal return sequence */ | 
 | 1336 |  | 
 | 1337 | /* | 
 | 1338 |  * --- Switch to running under a particular ASID and return the previous ASID value | 
 | 1339 |  * --- The caller is assumed to have done a cli before calling this. | 
 | 1340 |  * | 
 | 1341 |  * Input r2 : new ASID | 
 | 1342 |  * Output r2 : old ASID | 
 | 1343 |  */ | 
 | 1344 |  | 
 | 1345 | 	.global switch_and_save_asid | 
 | 1346 | switch_and_save_asid: | 
 | 1347 | 	getcon	sr, r0 | 
 | 1348 | 	movi	255, r4 | 
 | 1349 | 	shlli 	r4, 16, r4	/* r4 = mask to select ASID */ | 
 | 1350 | 	and	r0, r4, r3	/* r3 = shifted old ASID */ | 
 | 1351 | 	andi	r2, 255, r2	/* mask down new ASID */ | 
 | 1352 | 	shlli	r2, 16, r2	/* align new ASID against SR.ASID */ | 
 | 1353 | 	andc	r0, r4, r0	/* efface old ASID from SR */ | 
 | 1354 | 	or	r0, r2, r0	/* insert the new ASID */ | 
 | 1355 | 	putcon	r0, ssr | 
 | 1356 | 	movi	1f, r0 | 
 | 1357 | 	putcon	r0, spc | 
 | 1358 | 	rte | 
 | 1359 | 	nop | 
 | 1360 | 1: | 
 | 1361 | 	ptabs	LINK, tr0 | 
 | 1362 | 	shlri	r3, 16, r2	/* r2 = old ASID */ | 
 | 1363 | 	blink tr0, r63 | 
 | 1364 |  | 
 | 1365 | 	.global	route_to_panic_handler | 
 | 1366 | route_to_panic_handler: | 
 | 1367 | 	/* Switch to real mode, goto panic_handler, don't return.  Useful for | 
 | 1368 | 	   last-chance debugging, e.g. if no output wants to go to the console. | 
 | 1369 | 	   */ | 
 | 1370 |  | 
 | 1371 | 	movi	panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1 | 
 | 1372 | 	ptabs	r1, tr0 | 
 | 1373 | 	pta	1f, tr1 | 
 | 1374 | 	gettr	tr1, r0 | 
 | 1375 | 	putcon	r0, spc | 
 | 1376 | 	getcon	sr, r0 | 
 | 1377 | 	movi	1, r1 | 
 | 1378 | 	shlli	r1, 31, r1 | 
 | 1379 | 	andc	r0, r1, r0 | 
 | 1380 | 	putcon	r0, ssr | 
 | 1381 | 	rte | 
 | 1382 | 	nop | 
 | 1383 | 1:	/* Now in real mode */ | 
 | 1384 | 	blink tr0, r63 | 
 | 1385 | 	nop | 
 | 1386 |  | 
 | 1387 | 	.global peek_real_address_q | 
 | 1388 | peek_real_address_q: | 
 | 1389 | 	/* Two args: | 
 | 1390 | 	   r2 : real mode address to peek | 
 | 1391 | 	   r2(out) : result quadword | 
 | 1392 |  | 
 | 1393 | 	   This is provided as a cheapskate way of manipulating device | 
 | 1394 | 	   registers for debugging (to avoid the need to onchip_remap the debug | 
 | 1395 | 	   module, and to avoid the need to onchip_remap the watchpoint | 
 | 1396 | 	   controller in a way that identity maps sufficient bits to avoid the | 
 | 1397 | 	   SH5-101 cut2 silicon defect). | 
 | 1398 |  | 
 | 1399 | 	   This code is not performance critical | 
 | 1400 | 	*/ | 
 | 1401 |  | 
 | 1402 | 	add.l	r2, r63, r2	/* sign extend address */ | 
 | 1403 | 	getcon	sr, r0		/* r0 = saved original SR */ | 
 | 1404 | 	movi	1, r1 | 
 | 1405 | 	shlli	r1, 28, r1 | 
 | 1406 | 	or	r0, r1, r1	/* r0 with block bit set */ | 
 | 1407 | 	putcon	r1, sr		/* now in critical section */ | 
 | 1408 | 	movi	1, r36 | 
 | 1409 | 	shlli	r36, 31, r36 | 
 | 1410 | 	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */ | 
 | 1411 |  | 
 | 1412 | 	putcon	r1, ssr | 
 | 1413 | 	movi	.peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */ | 
 | 1414 | 	movi	1f, r37		/* virtual mode return addr */ | 
 | 1415 | 	putcon	r36, spc | 
 | 1416 |  | 
 | 1417 | 	synco | 
 | 1418 | 	rte | 
 | 1419 | 	nop | 
 | 1420 |  | 
 | 1421 | .peek0:	/* come here in real mode, don't touch caches!! | 
 | 1422 |            still in critical section (sr.bl==1) */ | 
 | 1423 | 	putcon	r0, ssr | 
 | 1424 | 	putcon	r37, spc | 
 | 1425 | 	/* Here's the actual peek.  If the address is bad, all bets are now off | 
 | 1426 | 	 * what will happen (handlers invoked in real-mode = bad news) */ | 
 | 1427 | 	ld.q	r2, 0, r2 | 
 | 1428 | 	synco | 
 | 1429 | 	rte	/* Back to virtual mode */ | 
 | 1430 | 	nop | 
 | 1431 |  | 
 | 1432 | 1: | 
 | 1433 | 	ptabs	LINK, tr0 | 
 | 1434 | 	blink	tr0, r63 | 
 | 1435 |  | 
 | 1436 | 	.global poke_real_address_q | 
 | 1437 | poke_real_address_q: | 
 | 1438 | 	/* Two args: | 
 | 1439 | 	   r2 : real mode address to poke | 
 | 1440 | 	   r3 : quadword value to write. | 
 | 1441 |  | 
 | 1442 | 	   This is provided as a cheapskate way of manipulating device | 
 | 1443 | 	   registers for debugging (to avoid the need to onchip_remap the debug | 
 | 1444 | 	   module, and to avoid the need to onchip_remap the watchpoint | 
 | 1445 | 	   controller in a way that identity maps sufficient bits to avoid the | 
 | 1446 | 	   SH5-101 cut2 silicon defect). | 
 | 1447 |  | 
 | 1448 | 	   This code is not performance critical | 
 | 1449 | 	*/ | 
 | 1450 |  | 
 | 1451 | 	add.l	r2, r63, r2	/* sign extend address */ | 
 | 1452 | 	getcon	sr, r0		/* r0 = saved original SR */ | 
 | 1453 | 	movi	1, r1 | 
 | 1454 | 	shlli	r1, 28, r1 | 
 | 1455 | 	or	r0, r1, r1	/* r0 with block bit set */ | 
 | 1456 | 	putcon	r1, sr		/* now in critical section */ | 
 | 1457 | 	movi	1, r36 | 
 | 1458 | 	shlli	r36, 31, r36 | 
 | 1459 | 	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */ | 
 | 1460 |  | 
 | 1461 | 	putcon	r1, ssr | 
 | 1462 | 	movi	.poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */ | 
 | 1463 | 	movi	1f, r37		/* virtual mode return addr */ | 
 | 1464 | 	putcon	r36, spc | 
 | 1465 |  | 
 | 1466 | 	synco | 
 | 1467 | 	rte | 
 | 1468 | 	nop | 
 | 1469 |  | 
 | 1470 | .poke0:	/* come here in real mode, don't touch caches!! | 
 | 1471 |            still in critical section (sr.bl==1) */ | 
 | 1472 | 	putcon	r0, ssr | 
 | 1473 | 	putcon	r37, spc | 
 | 1474 | 	/* Here's the actual poke.  If the address is bad, all bets are now off | 
 | 1475 | 	 * what will happen (handlers invoked in real-mode = bad news) */ | 
 | 1476 | 	st.q	r2, 0, r3 | 
 | 1477 | 	synco | 
 | 1478 | 	rte	/* Back to virtual mode */ | 
 | 1479 | 	nop | 
 | 1480 |  | 
 | 1481 | 1: | 
 | 1482 | 	ptabs	LINK, tr0 | 
 | 1483 | 	blink	tr0, r63 | 
 | 1484 |  | 
 | 1485 | /* | 
 | 1486 |  * --- User Access Handling Section | 
 | 1487 |  */ | 
 | 1488 |  | 
 | 1489 | /* | 
 | 1490 |  * User Access support. It all moved to non inlined Assembler | 
 | 1491 |  * functions in here. | 
 | 1492 |  * | 
 | 1493 |  * __kernel_size_t __copy_user(void *__to, const void *__from, | 
 | 1494 |  *			       __kernel_size_t __n) | 
 | 1495 |  * | 
 | 1496 |  * Inputs: | 
 | 1497 |  * (r2)  target address | 
 | 1498 |  * (r3)  source address | 
 | 1499 |  * (r4)  size in bytes | 
 | 1500 |  * | 
 | 1501 |  * Ouputs: | 
 | 1502 |  * (*r2) target data | 
 | 1503 |  * (r2)  non-copied bytes | 
 | 1504 |  * | 
 | 1505 |  * If a fault occurs on the user pointer, bail out early and return the | 
 | 1506 |  * number of bytes not copied in r2. | 
 | 1507 |  * Strategy : for large blocks, call a real memcpy function which can | 
 | 1508 |  * move >1 byte at a time using unaligned ld/st instructions, and can | 
 | 1509 |  * manipulate the cache using prefetch + alloco to improve the speed | 
 | 1510 |  * further.  If a fault occurs in that function, just revert to the | 
 | 1511 |  * byte-by-byte approach used for small blocks; this is rare so the | 
 | 1512 |  * performance hit for that case does not matter. | 
 | 1513 |  * | 
 | 1514 |  * For small blocks it's not worth the overhead of setting up and calling | 
 | 1515 |  * the memcpy routine; do the copy a byte at a time. | 
 | 1516 |  * | 
 | 1517 |  */ | 
 | 1518 | 	.global	__copy_user | 
 | 1519 | __copy_user: | 
 | 1520 | 	pta	__copy_user_byte_by_byte, tr1 | 
 | 1521 | 	movi	16, r0 ! this value is a best guess, should tune it by benchmarking | 
 | 1522 | 	bge/u	r0, r4, tr1 | 
 | 1523 | 	pta copy_user_memcpy, tr0 | 
 | 1524 | 	addi	SP, -32, SP | 
 | 1525 | 	/* Save arguments in case we have to fix-up unhandled page fault */ | 
 | 1526 | 	st.q	SP, 0, r2 | 
 | 1527 | 	st.q	SP, 8, r3 | 
 | 1528 | 	st.q	SP, 16, r4 | 
 | 1529 | 	st.q	SP, 24, r35 ! r35 is callee-save | 
 | 1530 | 	/* Save LINK in a register to reduce RTS time later (otherwise | 
 | 1531 | 	   ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */ | 
 | 1532 | 	ori	LINK, 0, r35 | 
 | 1533 | 	blink	tr0, LINK | 
 | 1534 |  | 
 | 1535 | 	/* Copy completed normally if we get back here */ | 
 | 1536 | 	ptabs	r35, tr0 | 
 | 1537 | 	ld.q	SP, 24, r35 | 
 | 1538 | 	/* don't restore r2-r4, pointless */ | 
 | 1539 | 	/* set result=r2 to zero as the copy must have succeeded. */ | 
 | 1540 | 	or	r63, r63, r2 | 
 | 1541 | 	addi	SP, 32, SP | 
 | 1542 | 	blink	tr0, r63 ! RTS | 
 | 1543 |  | 
 | 1544 | 	.global __copy_user_fixup | 
 | 1545 | __copy_user_fixup: | 
 | 1546 | 	/* Restore stack frame */ | 
 | 1547 | 	ori	r35, 0, LINK | 
 | 1548 | 	ld.q	SP, 24, r35 | 
 | 1549 | 	ld.q	SP, 16, r4 | 
 | 1550 | 	ld.q	SP,  8, r3 | 
 | 1551 | 	ld.q	SP,  0, r2 | 
 | 1552 | 	addi	SP, 32, SP | 
 | 1553 | 	/* Fall through to original code, in the 'same' state we entered with */ | 
 | 1554 |  | 
 | 1555 | /* The slow byte-by-byte method is used if the fast copy traps due to a bad | 
 | 1556 |    user address.  In that rare case, the speed drop can be tolerated. */ | 
 | 1557 | __copy_user_byte_by_byte: | 
 | 1558 | 	pta	___copy_user_exit, tr1 | 
 | 1559 | 	pta	___copy_user1, tr0 | 
 | 1560 | 	beq/u	r4, r63, tr1	/* early exit for zero length copy */ | 
 | 1561 | 	sub	r2, r3, r0 | 
 | 1562 | 	addi	r0, -1, r0 | 
 | 1563 |  | 
 | 1564 | ___copy_user1: | 
 | 1565 | 	ld.b	r3, 0, r5		/* Fault address 1 */ | 
 | 1566 |  | 
 | 1567 | 	/* Could rewrite this to use just 1 add, but the second comes 'free' | 
 | 1568 | 	   due to load latency */ | 
 | 1569 | 	addi	r3, 1, r3 | 
 | 1570 | 	addi	r4, -1, r4		/* No real fixup required */ | 
 | 1571 | ___copy_user2: | 
 | 1572 | 	stx.b	r3, r0, r5		/* Fault address 2 */ | 
 | 1573 | 	bne     r4, ZERO, tr0 | 
 | 1574 |  | 
 | 1575 | ___copy_user_exit: | 
 | 1576 | 	or	r4, ZERO, r2 | 
 | 1577 | 	ptabs	LINK, tr0 | 
 | 1578 | 	blink	tr0, ZERO | 
 | 1579 |  | 
 | 1580 | /* | 
 | 1581 |  * __kernel_size_t __clear_user(void *addr, __kernel_size_t size) | 
 | 1582 |  * | 
 | 1583 |  * Inputs: | 
 | 1584 |  * (r2)  target address | 
 | 1585 |  * (r3)  size in bytes | 
 | 1586 |  * | 
 | 1587 |  * Ouputs: | 
 | 1588 |  * (*r2) zero-ed target data | 
 | 1589 |  * (r2)  non-zero-ed bytes | 
 | 1590 |  */ | 
 | 1591 | 	.global	__clear_user | 
 | 1592 | __clear_user: | 
 | 1593 | 	pta	___clear_user_exit, tr1 | 
 | 1594 | 	pta	___clear_user1, tr0 | 
 | 1595 | 	beq/u	r3, r63, tr1 | 
 | 1596 |  | 
 | 1597 | ___clear_user1: | 
 | 1598 | 	st.b	r2, 0, ZERO		/* Fault address */ | 
 | 1599 | 	addi	r2, 1, r2 | 
 | 1600 | 	addi	r3, -1, r3		/* No real fixup required */ | 
 | 1601 | 	bne     r3, ZERO, tr0 | 
 | 1602 |  | 
 | 1603 | ___clear_user_exit: | 
 | 1604 | 	or	r3, ZERO, r2 | 
 | 1605 | 	ptabs	LINK, tr0 | 
 | 1606 | 	blink	tr0, ZERO | 
 | 1607 |  | 
 | 1608 |  | 
 | 1609 | /* | 
 | 1610 |  * int __strncpy_from_user(unsigned long __dest, unsigned long __src, | 
 | 1611 |  *			   int __count) | 
 | 1612 |  * | 
 | 1613 |  * Inputs: | 
 | 1614 |  * (r2)  target address | 
 | 1615 |  * (r3)  source address | 
 | 1616 |  * (r4)  maximum size in bytes | 
 | 1617 |  * | 
 | 1618 |  * Ouputs: | 
 | 1619 |  * (*r2) copied data | 
 | 1620 |  * (r2)  -EFAULT (in case of faulting) | 
 | 1621 |  *       copied data (otherwise) | 
 | 1622 |  */ | 
 | 1623 | 	.global	__strncpy_from_user | 
 | 1624 | __strncpy_from_user: | 
 | 1625 | 	pta	___strncpy_from_user1, tr0 | 
 | 1626 | 	pta	___strncpy_from_user_done, tr1 | 
 | 1627 | 	or	r4, ZERO, r5		/* r5 = original count */ | 
 | 1628 | 	beq/u	r4, r63, tr1		/* early exit if r4==0 */ | 
 | 1629 | 	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */ | 
 | 1630 | 	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */ | 
 | 1631 |  | 
 | 1632 | ___strncpy_from_user1: | 
 | 1633 | 	ld.b	r3, 0, r7		/* Fault address: only in reading */ | 
 | 1634 | 	st.b	r2, 0, r7 | 
 | 1635 | 	addi	r2, 1, r2 | 
 | 1636 | 	addi	r3, 1, r3 | 
 | 1637 | 	beq/u	ZERO, r7, tr1 | 
 | 1638 | 	addi	r4, -1, r4		/* return real number of copied bytes */ | 
 | 1639 | 	bne/l	ZERO, r4, tr0 | 
 | 1640 |  | 
 | 1641 | ___strncpy_from_user_done: | 
 | 1642 | 	sub	r5, r4, r6		/* If done, return copied */ | 
 | 1643 |  | 
 | 1644 | ___strncpy_from_user_exit: | 
 | 1645 | 	or	r6, ZERO, r2 | 
 | 1646 | 	ptabs	LINK, tr0 | 
 | 1647 | 	blink	tr0, ZERO | 
 | 1648 |  | 
 | 1649 | /* | 
 | 1650 |  * extern long __strnlen_user(const char *__s, long __n) | 
 | 1651 |  * | 
 | 1652 |  * Inputs: | 
 | 1653 |  * (r2)  source address | 
 | 1654 |  * (r3)  source size in bytes | 
 | 1655 |  * | 
 | 1656 |  * Ouputs: | 
 | 1657 |  * (r2)  -EFAULT (in case of faulting) | 
 | 1658 |  *       string length (otherwise) | 
 | 1659 |  */ | 
 | 1660 | 	.global	__strnlen_user | 
 | 1661 | __strnlen_user: | 
 | 1662 | 	pta	___strnlen_user_set_reply, tr0 | 
 | 1663 | 	pta	___strnlen_user1, tr1 | 
 | 1664 | 	or	ZERO, ZERO, r5		/* r5 = counter */ | 
 | 1665 | 	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */ | 
 | 1666 | 	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */ | 
 | 1667 | 	beq	r3, ZERO, tr0 | 
 | 1668 |  | 
 | 1669 | ___strnlen_user1: | 
 | 1670 | 	ldx.b	r2, r5, r7		/* Fault address: only in reading */ | 
 | 1671 | 	addi	r3, -1, r3		/* No real fixup */ | 
 | 1672 | 	addi	r5, 1, r5 | 
 | 1673 | 	beq	r3, ZERO, tr0 | 
 | 1674 | 	bne	r7, ZERO, tr1 | 
 | 1675 | ! The line below used to be active.  This meant led to a junk byte lying between each pair | 
 | 1676 | ! of entries in the argv & envp structures in memory.  Whilst the program saw the right data | 
 | 1677 | ! via the argv and envp arguments to main, it meant the 'flat' representation visible through | 
 | 1678 | ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. | 
 | 1679 | !	addi	r5, 1, r5		/* Include '\0' */ | 
 | 1680 |  | 
 | 1681 | ___strnlen_user_set_reply: | 
 | 1682 | 	or	r5, ZERO, r6		/* If done, return counter */ | 
 | 1683 |  | 
 | 1684 | ___strnlen_user_exit: | 
 | 1685 | 	or	r6, ZERO, r2 | 
 | 1686 | 	ptabs	LINK, tr0 | 
 | 1687 | 	blink	tr0, ZERO | 
 | 1688 |  | 
 | 1689 | /* | 
 | 1690 |  * extern long __get_user_asm_?(void *val, long addr) | 
 | 1691 |  * | 
 | 1692 |  * Inputs: | 
 | 1693 |  * (r2)  dest address | 
 | 1694 |  * (r3)  source address (in User Space) | 
 | 1695 |  * | 
 | 1696 |  * Ouputs: | 
 | 1697 |  * (r2)  -EFAULT (faulting) | 
 | 1698 |  *       0 	 (not faulting) | 
 | 1699 |  */ | 
 | 1700 | 	.global	__get_user_asm_b | 
 | 1701 | __get_user_asm_b: | 
 | 1702 | 	or	r2, ZERO, r4 | 
 | 1703 | 	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */ | 
 | 1704 |  | 
 | 1705 | ___get_user_asm_b1: | 
 | 1706 | 	ld.b	r3, 0, r5		/* r5 = data */ | 
 | 1707 | 	st.b	r4, 0, r5 | 
 | 1708 | 	or	ZERO, ZERO, r2 | 
 | 1709 |  | 
 | 1710 | ___get_user_asm_b_exit: | 
 | 1711 | 	ptabs	LINK, tr0 | 
 | 1712 | 	blink	tr0, ZERO | 
 | 1713 |  | 
 | 1714 |  | 
 | 1715 | 	.global	__get_user_asm_w | 
 | 1716 | __get_user_asm_w: | 
 | 1717 | 	or	r2, ZERO, r4 | 
 | 1718 | 	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */ | 
 | 1719 |  | 
 | 1720 | ___get_user_asm_w1: | 
 | 1721 | 	ld.w	r3, 0, r5		/* r5 = data */ | 
 | 1722 | 	st.w	r4, 0, r5 | 
 | 1723 | 	or	ZERO, ZERO, r2 | 
 | 1724 |  | 
 | 1725 | ___get_user_asm_w_exit: | 
 | 1726 | 	ptabs	LINK, tr0 | 
 | 1727 | 	blink	tr0, ZERO | 
 | 1728 |  | 
 | 1729 |  | 
 | 1730 | 	.global	__get_user_asm_l | 
 | 1731 | __get_user_asm_l: | 
 | 1732 | 	or	r2, ZERO, r4 | 
 | 1733 | 	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */ | 
 | 1734 |  | 
 | 1735 | ___get_user_asm_l1: | 
 | 1736 | 	ld.l	r3, 0, r5		/* r5 = data */ | 
 | 1737 | 	st.l	r4, 0, r5 | 
 | 1738 | 	or	ZERO, ZERO, r2 | 
 | 1739 |  | 
 | 1740 | ___get_user_asm_l_exit: | 
 | 1741 | 	ptabs	LINK, tr0 | 
 | 1742 | 	blink	tr0, ZERO | 
 | 1743 |  | 
 | 1744 |  | 
 | 1745 | 	.global	__get_user_asm_q | 
 | 1746 | __get_user_asm_q: | 
 | 1747 | 	or	r2, ZERO, r4 | 
 | 1748 | 	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */ | 
 | 1749 |  | 
 | 1750 | ___get_user_asm_q1: | 
 | 1751 | 	ld.q	r3, 0, r5		/* r5 = data */ | 
 | 1752 | 	st.q	r4, 0, r5 | 
 | 1753 | 	or	ZERO, ZERO, r2 | 
 | 1754 |  | 
 | 1755 | ___get_user_asm_q_exit: | 
 | 1756 | 	ptabs	LINK, tr0 | 
 | 1757 | 	blink	tr0, ZERO | 
 | 1758 |  | 
 | 1759 | /* | 
 | 1760 |  * extern long __put_user_asm_?(void *pval, long addr) | 
 | 1761 |  * | 
 | 1762 |  * Inputs: | 
 | 1763 |  * (r2)  kernel pointer to value | 
 | 1764 |  * (r3)  dest address (in User Space) | 
 | 1765 |  * | 
 | 1766 |  * Ouputs: | 
 | 1767 |  * (r2)  -EFAULT (faulting) | 
 | 1768 |  *       0 	 (not faulting) | 
 | 1769 |  */ | 
 | 1770 | 	.global	__put_user_asm_b | 
 | 1771 | __put_user_asm_b: | 
 | 1772 | 	ld.b	r2, 0, r4		/* r4 = data */ | 
 | 1773 | 	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */ | 
 | 1774 |  | 
 | 1775 | ___put_user_asm_b1: | 
 | 1776 | 	st.b	r3, 0, r4 | 
 | 1777 | 	or	ZERO, ZERO, r2 | 
 | 1778 |  | 
 | 1779 | ___put_user_asm_b_exit: | 
 | 1780 | 	ptabs	LINK, tr0 | 
 | 1781 | 	blink	tr0, ZERO | 
 | 1782 |  | 
 | 1783 |  | 
 | 1784 | 	.global	__put_user_asm_w | 
 | 1785 | __put_user_asm_w: | 
 | 1786 | 	ld.w	r2, 0, r4		/* r4 = data */ | 
 | 1787 | 	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */ | 
 | 1788 |  | 
 | 1789 | ___put_user_asm_w1: | 
 | 1790 | 	st.w	r3, 0, r4 | 
 | 1791 | 	or	ZERO, ZERO, r2 | 
 | 1792 |  | 
 | 1793 | ___put_user_asm_w_exit: | 
 | 1794 | 	ptabs	LINK, tr0 | 
 | 1795 | 	blink	tr0, ZERO | 
 | 1796 |  | 
 | 1797 |  | 
 | 1798 | 	.global	__put_user_asm_l | 
 | 1799 | __put_user_asm_l: | 
 | 1800 | 	ld.l	r2, 0, r4		/* r4 = data */ | 
 | 1801 | 	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */ | 
 | 1802 |  | 
 | 1803 | ___put_user_asm_l1: | 
 | 1804 | 	st.l	r3, 0, r4 | 
 | 1805 | 	or	ZERO, ZERO, r2 | 
 | 1806 |  | 
 | 1807 | ___put_user_asm_l_exit: | 
 | 1808 | 	ptabs	LINK, tr0 | 
 | 1809 | 	blink	tr0, ZERO | 
 | 1810 |  | 
 | 1811 |  | 
 | 1812 | 	.global	__put_user_asm_q | 
 | 1813 | __put_user_asm_q: | 
 | 1814 | 	ld.q	r2, 0, r4		/* r4 = data */ | 
 | 1815 | 	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */ | 
 | 1816 |  | 
 | 1817 | ___put_user_asm_q1: | 
 | 1818 | 	st.q	r3, 0, r4 | 
 | 1819 | 	or	ZERO, ZERO, r2 | 
 | 1820 |  | 
 | 1821 | ___put_user_asm_q_exit: | 
 | 1822 | 	ptabs	LINK, tr0 | 
 | 1823 | 	blink	tr0, ZERO | 
 | 1824 |  | 
 | 1825 | panic_stash_regs: | 
 | 1826 | 	/* The idea is : when we get an unhandled panic, we dump the registers | 
 | 1827 | 	   to a known memory location, the just sit in a tight loop. | 
 | 1828 | 	   This allows the human to look at the memory region through the GDB | 
 | 1829 | 	   session (assuming the debug module's SHwy initiator isn't locked up | 
 | 1830 | 	   or anything), to hopefully analyze the cause of the panic. */ | 
 | 1831 |  | 
 | 1832 | 	/* On entry, former r15 (SP) is in DCR | 
 | 1833 | 	   former r0  is at resvec_saved_area + 0 | 
 | 1834 | 	   former r1  is at resvec_saved_area + 8 | 
 | 1835 | 	   former tr0 is at resvec_saved_area + 32 | 
 | 1836 | 	   DCR is the only register whose value is lost altogether. | 
 | 1837 | 	*/ | 
 | 1838 |  | 
 | 1839 | 	movi	0xffffffff80000000, r0 ! phy of dump area | 
 | 1840 | 	ld.q	SP, 0x000, r1	! former r0 | 
 | 1841 | 	st.q	r0,  0x000, r1 | 
 | 1842 | 	ld.q	SP, 0x008, r1	! former r1 | 
 | 1843 | 	st.q	r0,  0x008, r1 | 
 | 1844 | 	st.q	r0,  0x010, r2 | 
 | 1845 | 	st.q	r0,  0x018, r3 | 
 | 1846 | 	st.q	r0,  0x020, r4 | 
 | 1847 | 	st.q	r0,  0x028, r5 | 
 | 1848 | 	st.q	r0,  0x030, r6 | 
 | 1849 | 	st.q	r0,  0x038, r7 | 
 | 1850 | 	st.q	r0,  0x040, r8 | 
 | 1851 | 	st.q	r0,  0x048, r9 | 
 | 1852 | 	st.q	r0,  0x050, r10 | 
 | 1853 | 	st.q	r0,  0x058, r11 | 
 | 1854 | 	st.q	r0,  0x060, r12 | 
 | 1855 | 	st.q	r0,  0x068, r13 | 
 | 1856 | 	st.q	r0,  0x070, r14 | 
 | 1857 | 	getcon	dcr, r14 | 
 | 1858 | 	st.q	r0,  0x078, r14 | 
 | 1859 | 	st.q	r0,  0x080, r16 | 
 | 1860 | 	st.q	r0,  0x088, r17 | 
 | 1861 | 	st.q	r0,  0x090, r18 | 
 | 1862 | 	st.q	r0,  0x098, r19 | 
 | 1863 | 	st.q	r0,  0x0a0, r20 | 
 | 1864 | 	st.q	r0,  0x0a8, r21 | 
 | 1865 | 	st.q	r0,  0x0b0, r22 | 
 | 1866 | 	st.q	r0,  0x0b8, r23 | 
 | 1867 | 	st.q	r0,  0x0c0, r24 | 
 | 1868 | 	st.q	r0,  0x0c8, r25 | 
 | 1869 | 	st.q	r0,  0x0d0, r26 | 
 | 1870 | 	st.q	r0,  0x0d8, r27 | 
 | 1871 | 	st.q	r0,  0x0e0, r28 | 
 | 1872 | 	st.q	r0,  0x0e8, r29 | 
 | 1873 | 	st.q	r0,  0x0f0, r30 | 
 | 1874 | 	st.q	r0,  0x0f8, r31 | 
 | 1875 | 	st.q	r0,  0x100, r32 | 
 | 1876 | 	st.q	r0,  0x108, r33 | 
 | 1877 | 	st.q	r0,  0x110, r34 | 
 | 1878 | 	st.q	r0,  0x118, r35 | 
 | 1879 | 	st.q	r0,  0x120, r36 | 
 | 1880 | 	st.q	r0,  0x128, r37 | 
 | 1881 | 	st.q	r0,  0x130, r38 | 
 | 1882 | 	st.q	r0,  0x138, r39 | 
 | 1883 | 	st.q	r0,  0x140, r40 | 
 | 1884 | 	st.q	r0,  0x148, r41 | 
 | 1885 | 	st.q	r0,  0x150, r42 | 
 | 1886 | 	st.q	r0,  0x158, r43 | 
 | 1887 | 	st.q	r0,  0x160, r44 | 
 | 1888 | 	st.q	r0,  0x168, r45 | 
 | 1889 | 	st.q	r0,  0x170, r46 | 
 | 1890 | 	st.q	r0,  0x178, r47 | 
 | 1891 | 	st.q	r0,  0x180, r48 | 
 | 1892 | 	st.q	r0,  0x188, r49 | 
 | 1893 | 	st.q	r0,  0x190, r50 | 
 | 1894 | 	st.q	r0,  0x198, r51 | 
 | 1895 | 	st.q	r0,  0x1a0, r52 | 
 | 1896 | 	st.q	r0,  0x1a8, r53 | 
 | 1897 | 	st.q	r0,  0x1b0, r54 | 
 | 1898 | 	st.q	r0,  0x1b8, r55 | 
 | 1899 | 	st.q	r0,  0x1c0, r56 | 
 | 1900 | 	st.q	r0,  0x1c8, r57 | 
 | 1901 | 	st.q	r0,  0x1d0, r58 | 
 | 1902 | 	st.q	r0,  0x1d8, r59 | 
 | 1903 | 	st.q	r0,  0x1e0, r60 | 
 | 1904 | 	st.q	r0,  0x1e8, r61 | 
 | 1905 | 	st.q	r0,  0x1f0, r62 | 
 | 1906 | 	st.q	r0,  0x1f8, r63	! bogus, but for consistency's sake... | 
 | 1907 |  | 
 | 1908 | 	ld.q	SP, 0x020, r1  ! former tr0 | 
 | 1909 | 	st.q	r0,  0x200, r1 | 
 | 1910 | 	gettr	tr1, r1 | 
 | 1911 | 	st.q	r0,  0x208, r1 | 
 | 1912 | 	gettr	tr2, r1 | 
 | 1913 | 	st.q	r0,  0x210, r1 | 
 | 1914 | 	gettr	tr3, r1 | 
 | 1915 | 	st.q	r0,  0x218, r1 | 
 | 1916 | 	gettr	tr4, r1 | 
 | 1917 | 	st.q	r0,  0x220, r1 | 
 | 1918 | 	gettr	tr5, r1 | 
 | 1919 | 	st.q	r0,  0x228, r1 | 
 | 1920 | 	gettr	tr6, r1 | 
 | 1921 | 	st.q	r0,  0x230, r1 | 
 | 1922 | 	gettr	tr7, r1 | 
 | 1923 | 	st.q	r0,  0x238, r1 | 
 | 1924 |  | 
 | 1925 | 	getcon	sr,  r1 | 
 | 1926 | 	getcon	ssr,  r2 | 
 | 1927 | 	getcon	pssr,  r3 | 
 | 1928 | 	getcon	spc,  r4 | 
 | 1929 | 	getcon	pspc,  r5 | 
 | 1930 | 	getcon	intevt,  r6 | 
 | 1931 | 	getcon	expevt,  r7 | 
 | 1932 | 	getcon	pexpevt,  r8 | 
 | 1933 | 	getcon	tra,  r9 | 
 | 1934 | 	getcon	tea,  r10 | 
 | 1935 | 	getcon	kcr0, r11 | 
 | 1936 | 	getcon	kcr1, r12 | 
 | 1937 | 	getcon	vbr,  r13 | 
 | 1938 | 	getcon	resvec,  r14 | 
 | 1939 |  | 
 | 1940 | 	st.q	r0,  0x240, r1 | 
 | 1941 | 	st.q	r0,  0x248, r2 | 
 | 1942 | 	st.q	r0,  0x250, r3 | 
 | 1943 | 	st.q	r0,  0x258, r4 | 
 | 1944 | 	st.q	r0,  0x260, r5 | 
 | 1945 | 	st.q	r0,  0x268, r6 | 
 | 1946 | 	st.q	r0,  0x270, r7 | 
 | 1947 | 	st.q	r0,  0x278, r8 | 
 | 1948 | 	st.q	r0,  0x280, r9 | 
 | 1949 | 	st.q	r0,  0x288, r10 | 
 | 1950 | 	st.q	r0,  0x290, r11 | 
 | 1951 | 	st.q	r0,  0x298, r12 | 
 | 1952 | 	st.q	r0,  0x2a0, r13 | 
 | 1953 | 	st.q	r0,  0x2a8, r14 | 
 | 1954 |  | 
 | 1955 | 	getcon	SPC,r2 | 
 | 1956 | 	getcon	SSR,r3 | 
 | 1957 | 	getcon	EXPEVT,r4 | 
 | 1958 | 	/* Prepare to jump to C - physical address */ | 
 | 1959 | 	movi	panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1 | 
 | 1960 | 	ori	r1, 1, r1 | 
 | 1961 | 	ptabs   r1, tr0 | 
 | 1962 | 	getcon	DCR, SP | 
 | 1963 | 	blink	tr0, ZERO | 
 | 1964 | 	nop | 
 | 1965 | 	nop | 
 | 1966 | 	nop | 
 | 1967 | 	nop | 
 | 1968 |  | 
 | 1969 |  | 
 | 1970 |  | 
 | 1971 |  | 
 | 1972 | /* | 
 | 1973 |  * --- Signal Handling Section | 
 | 1974 |  */ | 
 | 1975 |  | 
 | 1976 | /* | 
 | 1977 |  * extern long long _sa_default_rt_restorer | 
 | 1978 |  * extern long long _sa_default_restorer | 
 | 1979 |  * | 
 | 1980 |  *		 or, better, | 
 | 1981 |  * | 
 | 1982 |  * extern void _sa_default_rt_restorer(void) | 
 | 1983 |  * extern void _sa_default_restorer(void) | 
 | 1984 |  * | 
 | 1985 |  * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn() | 
 | 1986 |  * from user space. Copied into user space by signal management. | 
 | 1987 |  * Both must be quad aligned and 2 quad long (4 instructions). | 
 | 1988 |  * | 
 | 1989 |  */ | 
 | 1990 | 	.balign 8 | 
 | 1991 | 	.global sa_default_rt_restorer | 
 | 1992 | sa_default_rt_restorer: | 
 | 1993 | 	movi	0x10, r9 | 
 | 1994 | 	shori	__NR_rt_sigreturn, r9 | 
 | 1995 | 	trapa	r9 | 
 | 1996 | 	nop | 
 | 1997 |  | 
 | 1998 | 	.balign 8 | 
 | 1999 | 	.global sa_default_restorer | 
 | 2000 | sa_default_restorer: | 
 | 2001 | 	movi	0x10, r9 | 
 | 2002 | 	shori	__NR_sigreturn, r9 | 
 | 2003 | 	trapa	r9 | 
 | 2004 | 	nop | 
 | 2005 |  | 
 | 2006 | /* | 
 | 2007 |  * --- __ex_table Section | 
 | 2008 |  */ | 
 | 2009 |  | 
 | 2010 | /* | 
 | 2011 |  * User Access Exception Table. | 
 | 2012 |  */ | 
 | 2013 | 	.section	__ex_table,  "a" | 
 | 2014 |  | 
 | 2015 | 	.global asm_uaccess_start	/* Just a marker */ | 
 | 2016 | asm_uaccess_start: | 
 | 2017 |  | 
 | 2018 | 	.long	___copy_user1, ___copy_user_exit | 
 | 2019 | 	.long	___copy_user2, ___copy_user_exit | 
 | 2020 | 	.long	___clear_user1, ___clear_user_exit | 
 | 2021 | 	.long	___strncpy_from_user1, ___strncpy_from_user_exit | 
 | 2022 | 	.long	___strnlen_user1, ___strnlen_user_exit | 
 | 2023 | 	.long	___get_user_asm_b1, ___get_user_asm_b_exit | 
 | 2024 | 	.long	___get_user_asm_w1, ___get_user_asm_w_exit | 
 | 2025 | 	.long	___get_user_asm_l1, ___get_user_asm_l_exit | 
 | 2026 | 	.long	___get_user_asm_q1, ___get_user_asm_q_exit | 
 | 2027 | 	.long	___put_user_asm_b1, ___put_user_asm_b_exit | 
 | 2028 | 	.long	___put_user_asm_w1, ___put_user_asm_w_exit | 
 | 2029 | 	.long	___put_user_asm_l1, ___put_user_asm_l_exit | 
 | 2030 | 	.long	___put_user_asm_q1, ___put_user_asm_q_exit | 
 | 2031 |  | 
 | 2032 | 	.global asm_uaccess_end		/* Just a marker */ | 
 | 2033 | asm_uaccess_end: | 
 | 2034 |  | 
 | 2035 |  | 
 | 2036 |  | 
 | 2037 |  | 
 | 2038 | /* | 
 | 2039 |  * --- .text.init Section | 
 | 2040 |  */ | 
 | 2041 |  | 
 | 2042 | 	.section	.text.init, "ax" | 
 | 2043 |  | 
 | 2044 | /* | 
 | 2045 |  * void trap_init (void) | 
 | 2046 |  * | 
 | 2047 |  */ | 
 | 2048 | 	.global	trap_init | 
 | 2049 | trap_init: | 
 | 2050 | 	addi	SP, -24, SP			/* Room to save r28/r29/r30 */ | 
 | 2051 | 	st.q	SP, 0, r28 | 
 | 2052 | 	st.q	SP, 8, r29 | 
 | 2053 | 	st.q	SP, 16, r30 | 
 | 2054 |  | 
 | 2055 | 	/* Set VBR and RESVEC */ | 
 | 2056 | 	movi	LVBR_block, r19 | 
 | 2057 | 	andi	r19, -4, r19			/* reset MMUOFF + reserved */ | 
 | 2058 | 	/* For RESVEC exceptions we force the MMU off, which means we need the | 
 | 2059 | 	   physical address. */ | 
 | 2060 | 	movi	LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20 | 
 | 2061 | 	andi	r20, -4, r20			/* reset reserved */ | 
 | 2062 | 	ori	r20, 1, r20			/* set MMUOFF */ | 
 | 2063 | 	putcon	r19, VBR | 
 | 2064 | 	putcon	r20, RESVEC | 
 | 2065 |  | 
 | 2066 | 	/* Sanity check */ | 
 | 2067 | 	movi	LVBR_block_end, r21 | 
 | 2068 | 	andi	r21, -4, r21 | 
 | 2069 | 	movi	BLOCK_SIZE, r29			/* r29 = expected size */ | 
 | 2070 | 	or	r19, ZERO, r30 | 
 | 2071 | 	add	r19, r29, r19 | 
 | 2072 |  | 
 | 2073 | 	/* | 
 | 2074 | 	 * Ugly, but better loop forever now than crash afterwards. | 
 | 2075 | 	 * We should print a message, but if we touch LVBR or | 
 | 2076 | 	 * LRESVEC blocks we should not be surprised if we get stuck | 
 | 2077 | 	 * in trap_init(). | 
 | 2078 | 	 */ | 
 | 2079 | 	pta	trap_init_loop, tr1 | 
 | 2080 | 	gettr	tr1, r28			/* r28 = trap_init_loop */ | 
 | 2081 | 	sub	r21, r30, r30			/* r30 = actual size */ | 
 | 2082 |  | 
 | 2083 | 	/* | 
 | 2084 | 	 * VBR/RESVEC handlers overlap by being bigger than | 
 | 2085 | 	 * allowed. Very bad. Just loop forever. | 
 | 2086 | 	 * (r28) panic/loop address | 
 | 2087 | 	 * (r29) expected size | 
 | 2088 | 	 * (r30) actual size | 
 | 2089 | 	 */ | 
 | 2090 | trap_init_loop: | 
 | 2091 | 	bne	r19, r21, tr1 | 
 | 2092 |  | 
 | 2093 | 	/* Now that exception vectors are set up reset SR.BL */ | 
 | 2094 | 	getcon 	SR, r22 | 
 | 2095 | 	movi	SR_UNBLOCK_EXC, r23 | 
 | 2096 | 	and	r22, r23, r22 | 
 | 2097 | 	putcon	r22, SR | 
 | 2098 |  | 
 | 2099 | 	addi	SP, 24, SP | 
 | 2100 | 	ptabs	LINK, tr0 | 
 | 2101 | 	blink	tr0, ZERO | 
 | 2102 |  |