| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* arch/arm26/kernel/entry.S | 
|  | 2 | * | 
|  | 3 | * Assembled from chunks of code in arch/arm | 
|  | 4 | * | 
|  | 5 | * Copyright (C) 2003 Ian Molton | 
|  | 6 | * Based on the work of RMK. | 
|  | 7 | * | 
|  | 8 | */ | 
|  | 9 |  | 
|  | 10 | #include <linux/linkage.h> | 
|  | 11 |  | 
|  | 12 | #include <asm/assembler.h> | 
|  | 13 | #include <asm/asm_offsets.h> | 
|  | 14 | #include <asm/errno.h> | 
|  | 15 | #include <asm/hardware.h> | 
|  | 16 | #include <asm/sysirq.h> | 
|  | 17 | #include <asm/thread_info.h> | 
|  | 18 | #include <asm/page.h> | 
|  | 19 | #include <asm/ptrace.h> | 
|  | 20 |  | 
|  | 21 | .macro	zero_fp | 
|  | 22 | #ifndef CONFIG_NO_FRAME_POINTER | 
|  | 23 | mov	fp, #0 | 
|  | 24 | #endif | 
|  | 25 | .endm | 
|  | 26 |  | 
|  | 27 | .text | 
|  | 28 |  | 
|  | 29 | @ Bad Abort numbers | 
|  | 30 | @ ----------------- | 
|  | 31 | @ | 
|  | 32 | #define BAD_PREFETCH	0 | 
|  | 33 | #define BAD_DATA	1 | 
|  | 34 | #define BAD_ADDREXCPTN	2 | 
|  | 35 | #define BAD_IRQ		3 | 
|  | 36 | #define BAD_UNDEFINSTR	4 | 
|  | 37 |  | 
|  | 38 | @ OS version number used in SWIs | 
|  | 39 | @  RISC OS is 0 | 
|  | 40 | @  RISC iX is 8 | 
|  | 41 | @ | 
|  | 42 | #define OS_NUMBER	9 | 
|  | 43 | #define ARMSWI_OFFSET	0x000f0000 | 
|  | 44 |  | 
|  | 45 | @ | 
|  | 46 | @ Stack format (ensured by USER_* and SVC_*) | 
|  | 47 | @ PSR and PC are comined on arm26 | 
|  | 48 | @ | 
|  | 49 |  | 
|  | 50 | #define S_OFF		8 | 
|  | 51 |  | 
|  | 52 | #define S_OLD_R0	64 | 
|  | 53 | #define S_PC		60 | 
|  | 54 | #define S_LR		56 | 
|  | 55 | #define S_SP		52 | 
|  | 56 | #define S_IP		48 | 
|  | 57 | #define S_FP		44 | 
|  | 58 | #define S_R10		40 | 
|  | 59 | #define S_R9		36 | 
|  | 60 | #define S_R8		32 | 
|  | 61 | #define S_R7		28 | 
|  | 62 | #define S_R6		24 | 
|  | 63 | #define S_R5		20 | 
|  | 64 | #define S_R4		16 | 
|  | 65 | #define S_R3		12 | 
|  | 66 | #define S_R2		8 | 
|  | 67 | #define S_R1		4 | 
|  | 68 | #define S_R0		0 | 
|  | 69 |  | 
|  | 70 | .macro	save_user_regs | 
|  | 71 | str	r0, [sp, #-4]!   @ Store SVC r0 | 
|  | 72 | str	lr, [sp, #-4]!   @ Store user mode PC | 
|  | 73 | sub	sp, sp, #15*4 | 
|  | 74 | stmia	sp, {r0 - lr}^   @ Store the other user-mode regs | 
|  | 75 | mov	r0, r0 | 
|  | 76 | .endm | 
|  | 77 |  | 
|  | 78 | .macro	slow_restore_user_regs | 
|  | 79 | ldmia	sp, {r0 - lr}^   @ restore the user regs not including PC | 
|  | 80 | mov	r0, r0 | 
|  | 81 | ldr	lr, [sp, #15*4]  @ get user PC | 
|  | 82 | add	sp, sp, #15*4+8  @ free stack | 
|  | 83 | movs	pc, lr           @ return | 
|  | 84 | .endm | 
|  | 85 |  | 
|  | 86 | .macro	fast_restore_user_regs | 
|  | 87 | add	sp, sp, #S_OFF | 
|  | 88 | ldmib	sp, {r1 - lr}^ | 
|  | 89 | mov	r0, r0 | 
|  | 90 | ldr	lr, [sp, #15*4] | 
|  | 91 | add	sp, sp, #15*4+8 | 
|  | 92 | movs	pc, lr | 
|  | 93 | .endm | 
|  | 94 |  | 
|  | 95 | .macro	save_svc_regs | 
|  | 96 | str     sp, [sp, #-16]! | 
|  | 97 | str     lr, [sp, #8] | 
|  | 98 | str     lr, [sp, #4] | 
|  | 99 | stmfd   sp!, {r0 - r12} | 
|  | 100 | mov     r0, #-1 | 
|  | 101 | str     r0, [sp, #S_OLD_R0] | 
|  | 102 | zero_fp | 
|  | 103 | .endm | 
|  | 104 |  | 
|  | 105 | .macro	save_svc_regs_irq | 
|  | 106 | str     sp, [sp, #-16]! | 
|  | 107 | str     lr, [sp, #4] | 
|  | 108 | ldr     lr, .LCirq | 
|  | 109 | ldr     lr, [lr] | 
|  | 110 | str     lr, [sp, #8] | 
|  | 111 | stmfd   sp!, {r0 - r12} | 
|  | 112 | mov     r0, #-1 | 
|  | 113 | str     r0, [sp, #S_OLD_R0] | 
|  | 114 | zero_fp | 
|  | 115 | .endm | 
|  | 116 |  | 
|  | 117 | .macro	restore_svc_regs | 
|  | 118 | ldmfd   sp, {r0 - pc}^ | 
|  | 119 | .endm | 
|  | 120 |  | 
|  | 121 | .macro	mask_pc, rd, rm | 
|  | 122 | bic	\rd, \rm, #PCMASK | 
|  | 123 | .endm | 
|  | 124 |  | 
|  | 125 | .macro  disable_irqs, temp | 
|  | 126 | mov     \temp, pc | 
|  | 127 | orr     \temp, \temp, #PSR_I_BIT | 
|  | 128 | teqp    \temp, #0 | 
|  | 129 | .endm | 
|  | 130 |  | 
|  | 131 | .macro	enable_irqs, temp | 
|  | 132 | mov     \temp, pc | 
|  | 133 | and     \temp, \temp, #~PSR_I_BIT | 
|  | 134 | teqp	\temp, #0 | 
|  | 135 | .endm | 
|  | 136 |  | 
|  | 137 | .macro	initialise_traps_extra | 
|  | 138 | .endm | 
|  | 139 |  | 
|  | 140 | .macro	get_thread_info, rd | 
|  | 141 | mov	\rd, sp, lsr #13 | 
|  | 142 | mov	\rd, \rd, lsl #13 | 
|  | 143 | .endm | 
|  | 144 |  | 
|  | 145 | /* | 
|  | 146 | * These are the registers used in the syscall handler, and allow us to | 
|  | 147 | * have in theory up to 7 arguments to a function - r0 to r6. | 
|  | 148 | * | 
|  | 149 | * Note that tbl == why is intentional. | 
|  | 150 | * | 
|  | 151 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. | 
|  | 152 | */ | 
|  | 153 | scno	.req	r7		@ syscall number | 
|  | 154 | tbl	.req	r8		@ syscall table pointer | 
|  | 155 | why	.req	r8		@ Linux syscall (!= 0) | 
|  | 156 | tsk	.req	r9		@ current thread_info | 
|  | 157 |  | 
|  | 158 | /* | 
|  | 159 | * Get the system call number. | 
|  | 160 | */ | 
|  | 161 | .macro	get_scno | 
|  | 162 | mask_pc	lr, lr | 
|  | 163 | ldr	scno, [lr, #-4]		@ get SWI instruction | 
|  | 164 | .endm | 
|  | 165 | /* | 
|  | 166 | *  ----------------------------------------------------------------------- | 
|  | 167 | */ | 
|  | 168 |  | 
|  | 169 | /* | 
|  | 170 | * We rely on the fact that R0 is at the bottom of the stack (due to | 
|  | 171 | * slow/fast restore user regs). | 
|  | 172 | */ | 
|  | 173 | #if S_R0 != 0 | 
|  | 174 | #error "Please fix" | 
|  | 175 | #endif | 
|  | 176 |  | 
|  | 177 | /* | 
|  | 178 | * This is the fast syscall return path.  We do as little as | 
|  | 179 | * possible here, and this includes saving r0 back into the SVC | 
|  | 180 | * stack. | 
|  | 181 | */ | 
|  | 182 | ret_fast_syscall: | 
|  | 183 | disable_irqs r1				@ disable interrupts | 
|  | 184 | ldr	r1, [tsk, #TI_FLAGS] | 
|  | 185 | tst	r1, #_TIF_WORK_MASK | 
|  | 186 | bne	fast_work_pending | 
|  | 187 | fast_restore_user_regs | 
|  | 188 |  | 
|  | 189 | /* | 
|  | 190 | * Ok, we need to do extra processing, enter the slow path. | 
|  | 191 | */ | 
|  | 192 | fast_work_pending: | 
|  | 193 | str	r0, [sp, #S_R0+S_OFF]!		@ returned r0 | 
|  | 194 | work_pending: | 
|  | 195 | tst	r1, #_TIF_NEED_RESCHED | 
|  | 196 | bne	work_resched | 
|  | 197 | tst	r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | 
|  | 198 | beq	no_work_pending | 
|  | 199 | mov	r0, sp				@ 'regs' | 
|  | 200 | mov	r2, why				@ 'syscall' | 
|  | 201 | bl	do_notify_resume | 
|  | 202 | disable_irqs r1				@ disable interrupts | 
|  | 203 | b	no_work_pending | 
|  | 204 |  | 
|  | 205 | work_resched: | 
|  | 206 | bl	schedule | 
|  | 207 | /* | 
|  | 208 | * "slow" syscall return path.  "why" tells us if this was a real syscall. | 
|  | 209 | */ | 
|  | 210 | ENTRY(ret_to_user) | 
|  | 211 | ret_slow_syscall: | 
|  | 212 | disable_irqs r1				@ disable interrupts | 
|  | 213 | ldr	r1, [tsk, #TI_FLAGS] | 
|  | 214 | tst	r1, #_TIF_WORK_MASK | 
|  | 215 | bne	work_pending | 
|  | 216 | no_work_pending: | 
|  | 217 | slow_restore_user_regs | 
|  | 218 |  | 
|  | 219 | /* | 
|  | 220 | * This is how we return from a fork. | 
|  | 221 | */ | 
|  | 222 | ENTRY(ret_from_fork) | 
|  | 223 | bl	schedule_tail | 
|  | 224 | get_thread_info tsk | 
|  | 225 | ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing | 
|  | 226 | mov	why, #1 | 
|  | 227 | tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls? | 
|  | 228 | beq	ret_slow_syscall | 
|  | 229 | mov	r1, sp | 
|  | 230 | mov	r0, #1				@ trace exit [IP = 1] | 
|  | 231 | bl	syscall_trace | 
|  | 232 | b	ret_slow_syscall | 
|  | 233 |  | 
|  | 234 | // FIXME - is this strictly necessary? | 
|  | 235 | #include "calls.S" | 
|  | 236 |  | 
|  | 237 | /*============================================================================= | 
|  | 238 | * SWI handler | 
|  | 239 | *----------------------------------------------------------------------------- | 
|  | 240 | */ | 
|  | 241 |  | 
|  | 242 | .align	5 | 
|  | 243 | ENTRY(vector_swi) | 
|  | 244 | save_user_regs | 
|  | 245 | zero_fp | 
|  | 246 | get_scno | 
|  | 247 |  | 
|  | 248 | #ifdef CONFIG_ALIGNMENT_TRAP | 
|  | 249 | ldr	ip, __cr_alignment | 
|  | 250 | ldr	ip, [ip] | 
|  | 251 | mcr	p15, 0, ip, c1, c0		@ update control register | 
|  | 252 | #endif | 
|  | 253 | enable_irqs ip | 
|  | 254 |  | 
|  | 255 | str	r4, [sp, #-S_OFF]!		@ push fifth arg | 
|  | 256 |  | 
|  | 257 | get_thread_info tsk | 
|  | 258 | ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing | 
|  | 259 | bic	scno, scno, #0xff000000		@ mask off SWI op-code | 
|  | 260 | eor	scno, scno, #OS_NUMBER << 20	@ check OS number | 
|  | 261 | adr	tbl, sys_call_table		@ load syscall table pointer | 
|  | 262 | tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls? | 
|  | 263 | bne	__sys_trace | 
|  | 264 |  | 
|  | 265 | adral	lr, ret_fast_syscall            @ set return address | 
|  | 266 | orral	lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC mode on return | 
|  | 267 | cmp	scno, #NR_syscalls		@ check upper syscall limit | 
|  | 268 | ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine | 
|  | 269 |  | 
|  | 270 | add	r1, sp, #S_OFF | 
|  | 271 | 2:	mov	why, #0				@ no longer a real syscall | 
|  | 272 | cmp	scno, #ARMSWI_OFFSET | 
|  | 273 | eor	r0, scno, #OS_NUMBER << 20	@ put OS number back | 
|  | 274 | bcs	arm_syscall | 
|  | 275 | b	sys_ni_syscall			@ not private func | 
|  | 276 |  | 
|  | 277 | /* | 
|  | 278 | * This is the really slow path.  We're going to be doing | 
|  | 279 | * context switches, and waiting for our parent to respond. | 
|  | 280 | */ | 
|  | 281 | __sys_trace: | 
|  | 282 | add	r1, sp, #S_OFF | 
|  | 283 | mov	r0, #0				@ trace entry [IP = 0] | 
|  | 284 | bl	syscall_trace | 
|  | 285 |  | 
|  | 286 | adral   lr, __sys_trace_return          @ set return address | 
|  | 287 | orral   lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC mode on return | 
|  | 288 | add	r1, sp, #S_R0 + S_OFF		@ pointer to regs | 
|  | 289 | cmp	scno, #NR_syscalls		@ check upper syscall limit | 
|  | 290 | ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3 | 
|  | 291 | ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine | 
|  | 292 | b	2b | 
|  | 293 |  | 
|  | 294 | __sys_trace_return: | 
|  | 295 | str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0 | 
|  | 296 | mov	r1, sp | 
|  | 297 | mov	r0, #1				@ trace exit [IP = 1] | 
|  | 298 | bl	syscall_trace | 
|  | 299 | b	ret_slow_syscall | 
|  | 300 |  | 
|  | 301 | .align	5 | 
|  | 302 | #ifdef CONFIG_ALIGNMENT_TRAP | 
|  | 303 | .type	__cr_alignment, #object | 
|  | 304 | __cr_alignment: | 
|  | 305 | .word	cr_alignment | 
|  | 306 | #endif | 
|  | 307 |  | 
|  | 308 | .type	sys_call_table, #object | 
|  | 309 | ENTRY(sys_call_table) | 
|  | 310 | #include "calls.S" | 
|  | 311 |  | 
|  | 312 | /*============================================================================ | 
|  | 313 | * Special system call wrappers | 
|  | 314 | */ | 
|  | 315 | @ r0 = syscall number | 
|  | 316 | @ r5 = syscall table | 
|  | 317 | .type	sys_syscall, #function | 
|  | 318 | sys_syscall: | 
|  | 319 | eor	scno, r0, #OS_NUMBER << 20 | 
|  | 320 | cmp	scno, #NR_syscalls	@ check range | 
|  | 321 | stmleia	sp, {r5, r6}		@ shuffle args | 
|  | 322 | movle	r0, r1 | 
|  | 323 | movle	r1, r2 | 
|  | 324 | movle	r2, r3 | 
|  | 325 | movle	r3, r4 | 
|  | 326 | ldrle	pc, [tbl, scno, lsl #2] | 
|  | 327 | b	sys_ni_syscall | 
|  | 328 |  | 
|  | 329 | sys_fork_wrapper: | 
|  | 330 | add	r0, sp, #S_OFF | 
|  | 331 | b	sys_fork | 
|  | 332 |  | 
|  | 333 | sys_vfork_wrapper: | 
|  | 334 | add	r0, sp, #S_OFF | 
|  | 335 | b	sys_vfork | 
|  | 336 |  | 
|  | 337 | sys_execve_wrapper: | 
|  | 338 | add	r3, sp, #S_OFF | 
|  | 339 | b	sys_execve | 
|  | 340 |  | 
|  | 341 | sys_clone_wapper: | 
|  | 342 | add	r2, sp, #S_OFF | 
|  | 343 | b	sys_clone | 
|  | 344 |  | 
|  | 345 | sys_sigsuspend_wrapper: | 
|  | 346 | add	r3, sp, #S_OFF | 
|  | 347 | b	sys_sigsuspend | 
|  | 348 |  | 
|  | 349 | sys_rt_sigsuspend_wrapper: | 
|  | 350 | add	r2, sp, #S_OFF | 
|  | 351 | b	sys_rt_sigsuspend | 
|  | 352 |  | 
|  | 353 | sys_sigreturn_wrapper: | 
|  | 354 | add	r0, sp, #S_OFF | 
|  | 355 | b	sys_sigreturn | 
|  | 356 |  | 
|  | 357 | sys_rt_sigreturn_wrapper: | 
|  | 358 | add	r0, sp, #S_OFF | 
|  | 359 | b	sys_rt_sigreturn | 
|  | 360 |  | 
|  | 361 | sys_sigaltstack_wrapper: | 
|  | 362 | ldr	r2, [sp, #S_OFF + S_SP] | 
|  | 363 | b	do_sigaltstack | 
|  | 364 |  | 
|  | 365 | /* | 
|  | 366 | * Note: off_4k (r5) is always units of 4K.  If we can't do the requested | 
|  | 367 | * offset, we return EINVAL.  FIXME - this lost some stuff from arm32 to | 
|  | 368 | * ifdefs. check it out. | 
|  | 369 | */ | 
|  | 370 | sys_mmap2: | 
|  | 371 | tst	r5, #((1 << (PAGE_SHIFT - 12)) - 1) | 
|  | 372 | moveq	r5, r5, lsr #PAGE_SHIFT - 12 | 
|  | 373 | streq	r5, [sp, #4] | 
|  | 374 | beq	do_mmap2 | 
|  | 375 | mov	r0, #-EINVAL | 
|  | 376 | RETINSTR(mov,pc, lr) | 
|  | 377 |  | 
|  | 378 | /* | 
|  | 379 | *  Design issues: | 
|  | 380 | *   - We have several modes that each vector can be called from, | 
|  | 381 | *     each with its own set of registers.  On entry to any vector, | 
|  | 382 | *     we *must* save the registers used in *that* mode. | 
|  | 383 | * | 
|  | 384 | *   - This code must be as fast as possible. | 
|  | 385 | * | 
|  | 386 | *  There are a few restrictions on the vectors: | 
|  | 387 | *   - the SWI vector cannot be called from *any* non-user mode | 
|  | 388 | * | 
|  | 389 | *   - the FP emulator is *never* called from *any* non-user mode undefined | 
|  | 390 | *     instruction. | 
|  | 391 | * | 
|  | 392 | */ | 
|  | 393 |  | 
|  | 394 | .text | 
|  | 395 |  | 
|  | 396 | .macro handle_irq | 
|  | 397 | 1:		mov     r4, #IOC_BASE | 
|  | 398 | ldrb    r6, [r4, #0x24]            @ get high priority first | 
|  | 399 | adr     r5, irq_prio_h | 
|  | 400 | teq     r6, #0 | 
|  | 401 | ldreqb  r6, [r4, #0x14]            @ get low priority | 
|  | 402 | adreq   r5, irq_prio_l | 
|  | 403 |  | 
|  | 404 | teq     r6, #0                     @ If an IRQ happened... | 
|  | 405 | ldrneb  r0, [r5, r6]               @ get IRQ number | 
|  | 406 | movne   r1, sp                     @ get struct pt_regs | 
|  | 407 | adrne   lr, 1b                     @ Set return address to 1b | 
|  | 408 | orrne   lr, lr, #PSR_I_BIT | MODE_SVC26  @ (and force SVC mode) | 
|  | 409 | bne     asm_do_IRQ                 @ process IRQ (if asserted) | 
|  | 410 | .endm | 
|  | 411 |  | 
|  | 412 |  | 
|  | 413 | /* | 
|  | 414 | * Interrupt table (incorporates priority) | 
|  | 415 | */ | 
|  | 416 | .macro	irq_prio_table | 
|  | 417 | irq_prio_l:	.byte	 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 | 
|  | 418 | .byte	 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 | 
|  | 419 | .byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 | 
|  | 420 | .byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 | 
|  | 421 | .byte	 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3 | 
|  | 422 | .byte	 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3 | 
|  | 423 | .byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 | 
|  | 424 | .byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 | 
|  | 425 | .byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 | 
|  | 426 | .byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 | 
|  | 427 | .byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 | 
|  | 428 | .byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 | 
|  | 429 | .byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 | 
|  | 430 | .byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 | 
|  | 431 | .byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 | 
|  | 432 | .byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 | 
|  | 433 | irq_prio_h:	.byte	 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 434 | .byte	12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 435 | .byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 436 | .byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 437 | .byte	14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 438 | .byte	14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 439 | .byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 440 | .byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 441 | .byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 442 | .byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 443 | .byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 444 | .byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 445 | .byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 446 | .byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 447 | .byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 448 | .byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10 | 
|  | 449 | .endm | 
|  | 450 |  | 
|  | 451 | #if 1 | 
|  | 452 | /* | 
|  | 453 | * Uncomment these if you wish to get more debugging into about data aborts. | 
|  | 454 | * FIXME - I bet we can find a way to encode these and keep performance. | 
|  | 455 | */ | 
|  | 456 | #define FAULT_CODE_LDRSTRPOST	0x80 | 
|  | 457 | #define FAULT_CODE_LDRSTRPRE	0x40 | 
|  | 458 | #define FAULT_CODE_LDRSTRREG	0x20 | 
|  | 459 | #define FAULT_CODE_LDMSTM	0x10 | 
|  | 460 | #define FAULT_CODE_LDCSTC	0x08 | 
|  | 461 | #endif | 
|  | 462 | #define FAULT_CODE_PREFETCH	0x04 | 
|  | 463 | #define FAULT_CODE_WRITE	0x02 | 
|  | 464 | #define FAULT_CODE_FORCECOW	0x01 | 
|  | 465 |  | 
|  | 466 | /*============================================================================= | 
|  | 467 | * Undefined FIQs | 
|  | 468 | *----------------------------------------------------------------------------- | 
|  | 469 | */ | 
|  | 470 | _unexp_fiq:	ldr     sp, .LCfiq | 
|  | 471 | mov	r12, #IOC_BASE | 
|  | 472 | strb	r12, [r12, #0x38]	@ Disable FIQ register | 
|  | 473 | teqp	pc, #PSR_I_BIT | PSR_F_BIT | MODE_SVC26 | 
|  | 474 | mov	r0, r0 | 
|  | 475 | stmfd	sp!, {r0 - r3, ip, lr} | 
|  | 476 | adr	r0, Lfiqmsg | 
|  | 477 | bl	printk | 
|  | 478 | ldmfd	sp!, {r0 - r3, ip, lr} | 
|  | 479 | teqp	pc, #PSR_I_BIT | PSR_F_BIT | MODE_FIQ26 | 
|  | 480 | mov	r0, r0 | 
|  | 481 | movs	pc, lr | 
|  | 482 |  | 
|  | 483 | Lfiqmsg:	.ascii	"*** Unexpected FIQ\n\0" | 
|  | 484 | .align | 
|  | 485 |  | 
|  | 486 | .LCfiq:		.word	__temp_fiq | 
|  | 487 | .LCirq:		.word	__temp_irq | 
|  | 488 |  | 
|  | 489 | /*============================================================================= | 
|  | 490 | * Undefined instruction handler | 
|  | 491 | *----------------------------------------------------------------------------- | 
|  | 492 | * Handles floating point instructions | 
|  | 493 | */ | 
|  | 494 | vector_undefinstr: | 
|  | 495 | tst	lr, #MODE_SVC26          @ did we come from a non-user mode? | 
|  | 496 | bne	__und_svc                @ yes - deal with it. | 
|  | 497 | /* Otherwise, fall through for the user-space (common) case. */ | 
|  | 498 | save_user_regs | 
|  | 499 | zero_fp                                 @ zero frame pointer | 
|  | 500 | teqp	pc, #PSR_I_BIT | MODE_SVC26     @ disable IRQs | 
|  | 501 | .Lbug_undef: | 
|  | 502 | ldr	r4, .LC2 | 
|  | 503 | ldr     pc, [r4]         @ Call FP module entry point | 
|  | 504 | /* FIXME - should we trap for a null pointer here? */ | 
|  | 505 |  | 
|  | 506 | /* The SVC mode case */ | 
|  | 507 | __und_svc:	save_svc_regs                           @ Non-user mode | 
|  | 508 | mask_pc r0, lr | 
|  | 509 | and     r2, lr, #3 | 
|  | 510 | sub     r0, r0, #4 | 
|  | 511 | mov     r1, sp | 
|  | 512 | bl      do_undefinstr | 
|  | 513 | restore_svc_regs | 
|  | 514 |  | 
|  | 515 | /* We get here if the FP emulator doesnt handle the undef instr. | 
|  | 516 | * If the insn WAS handled, the emulator jumps to ret_from_exception by itself/ | 
|  | 517 | */ | 
|  | 518 | .globl	fpundefinstr | 
|  | 519 | fpundefinstr: | 
|  | 520 | mov	r0, lr | 
|  | 521 | mov	r1, sp | 
|  | 522 | teqp	pc, #MODE_SVC26 | 
|  | 523 | bl	do_undefinstr | 
|  | 524 | b	ret_from_exception		@ Normal FP exit | 
|  | 525 |  | 
|  | 526 | #if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE | 
|  | 527 | /* The FPE is always present */ | 
|  | 528 | .equ	fpe_not_present, 0 | 
|  | 529 | #else | 
|  | 530 | /* We get here if an undefined instruction happens and the floating | 
|  | 531 | * point emulator is not present.  If the offending instruction was | 
|  | 532 | * a WFS, we just perform a normal return as if we had emulated the | 
|  | 533 | * operation.  This is a hack to allow some basic userland binaries | 
|  | 534 | * to run so that the emulator module proper can be loaded. --philb | 
|  | 535 | * FIXME - probably a broken useless hack... | 
|  | 536 | */ | 
|  | 537 | fpe_not_present: | 
|  | 538 | adr	r10, wfs_mask_data | 
|  | 539 | ldmia	r10, {r4, r5, r6, r7, r8} | 
|  | 540 | ldr	r10, [sp, #S_PC]		@ Load PC | 
|  | 541 | sub	r10, r10, #4 | 
|  | 542 | mask_pc	r10, r10 | 
|  | 543 | ldrt	r10, [r10]			@ get instruction | 
|  | 544 | and	r5, r10, r5 | 
|  | 545 | teq	r5, r4				@ Is it WFS? | 
|  | 546 | beq	ret_from_exception | 
|  | 547 | and	r5, r10, r8 | 
|  | 548 | teq	r5, r6				@ Is it LDF/STF on sp or fp? | 
|  | 549 | teqne	r5, r7 | 
|  | 550 | bne	fpundefinstr | 
|  | 551 | tst	r10, #0x00200000		@ Does it have WB | 
|  | 552 | beq	ret_from_exception | 
|  | 553 | and	r4, r10, #255			@ get offset | 
|  | 554 | and	r6, r10, #0x000f0000 | 
|  | 555 | tst	r10, #0x00800000		@ +/- | 
|  | 556 | ldr	r5, [sp, r6, lsr #14]		@ Load reg | 
|  | 557 | rsbeq	r4, r4, #0 | 
|  | 558 | add	r5, r5, r4, lsl #2 | 
|  | 559 | str	r5, [sp, r6, lsr #14]		@ Save reg | 
|  | 560 | b	ret_from_exception | 
|  | 561 |  | 
|  | 562 | wfs_mask_data:	.word	0x0e200110			@ WFS/RFS | 
|  | 563 | .word	0x0fef0fff | 
|  | 564 | .word	0x0d0d0100			@ LDF [sp]/STF [sp] | 
|  | 565 | .word	0x0d0b0100			@ LDF [fp]/STF [fp] | 
|  | 566 | .word	0x0f0f0f00 | 
|  | 567 | #endif | 
|  | 568 |  | 
|  | 569 | .LC2:		.word	fp_enter | 
|  | 570 |  | 
|  | 571 | /*============================================================================= | 
|  | 572 | * Prefetch abort handler | 
|  | 573 | *----------------------------------------------------------------------------- | 
|  | 574 | */ | 
|  | 575 | #define DEBUG_UNDEF | 
|  | 576 | /* remember: lr = USR pc */ | 
|  | 577 | vector_prefetch: | 
|  | 578 | sub	lr, lr, #4 | 
|  | 579 | tst	lr, #MODE_SVC26 | 
|  | 580 | bne	__pabt_invalid | 
|  | 581 | save_user_regs | 
|  | 582 | teqp	pc, #MODE_SVC26         @ Enable IRQs... | 
|  | 583 | mask_pc	r0, lr			@ Address of abort | 
|  | 584 | mov	r1, sp			@ Tasks registers | 
|  | 585 | bl	do_PrefetchAbort | 
|  | 586 | teq	r0, #0			@ If non-zero, we believe this abort.. | 
|  | 587 | bne	ret_from_exception | 
|  | 588 | #ifdef DEBUG_UNDEF | 
|  | 589 | adr	r0, t | 
|  | 590 | bl	printk | 
|  | 591 | #endif | 
|  | 592 | ldr	lr, [sp,#S_PC]		@ FIXME program to test this on.  I think its | 
|  | 593 | b	.Lbug_undef		@ broken at the moment though!) | 
|  | 594 |  | 
|  | 595 | __pabt_invalid:	save_svc_regs | 
|  | 596 | mov	r0, sp			@ Prefetch aborts are definitely *not* | 
|  | 597 | mov	r1, #BAD_PREFETCH	@ allowed in non-user modes.  We cant | 
|  | 598 | and	r2, lr, #3		@ recover from this problem. | 
|  | 599 | b	bad_mode | 
|  | 600 |  | 
|  | 601 | #ifdef DEBUG_UNDEF | 
|  | 602 | t:		.ascii "*** undef ***\r\n\0" | 
|  | 603 | .align | 
|  | 604 | #endif | 
|  | 605 |  | 
|  | 606 | /*============================================================================= | 
|  | 607 | * Address exception handler | 
|  | 608 | *----------------------------------------------------------------------------- | 
|  | 609 | * These aren't too critical. | 
|  | 610 | * (they're not supposed to happen). | 
|  | 611 | * In order to debug the reason for address exceptions in non-user modes, | 
|  | 612 | * we have to obtain all the registers so that we can see what's going on. | 
|  | 613 | */ | 
|  | 614 |  | 
|  | 615 | vector_addrexcptn: | 
|  | 616 | sub	lr, lr, #8 | 
|  | 617 | tst	lr, #3 | 
|  | 618 | bne	Laddrexcptn_not_user | 
|  | 619 | save_user_regs | 
|  | 620 | teq	pc, #MODE_SVC26 | 
|  | 621 | mask_pc	r0, lr			@ Point to instruction | 
|  | 622 | mov	r1, sp			@ Point to registers | 
|  | 623 | mov	r2, #0x400 | 
|  | 624 | mov	lr, pc | 
|  | 625 | bl	do_excpt | 
|  | 626 | b	ret_from_exception | 
|  | 627 |  | 
|  | 628 | Laddrexcptn_not_user: | 
|  | 629 | save_svc_regs | 
|  | 630 | and	r2, lr, #3 | 
|  | 631 | teq	r2, #3 | 
|  | 632 | bne	Laddrexcptn_illegal_mode | 
|  | 633 | teqp	pc, #MODE_SVC26 | 
|  | 634 | mask_pc	r0, lr | 
|  | 635 | mov	r1, sp | 
|  | 636 | orr	r2, r2, #0x400 | 
|  | 637 | bl	do_excpt | 
|  | 638 | ldmia	sp, {r0 - lr}		@ I cant remember the reason I changed this... | 
|  | 639 | add	sp, sp, #15*4 | 
|  | 640 | movs	pc, lr | 
|  | 641 |  | 
|  | 642 | Laddrexcptn_illegal_mode: | 
|  | 643 | mov	r0, sp | 
|  | 644 | str	lr, [sp, #-4]! | 
|  | 645 | orr	r1, r2, #PSR_I_BIT | PSR_F_BIT | 
|  | 646 | teqp	r1, #0			@ change into mode (wont be user mode) | 
|  | 647 | mov	r0, r0 | 
|  | 648 | mov	r1, r8			@ Any register from r8 - r14 can be banked | 
|  | 649 | mov	r2, r9 | 
|  | 650 | mov	r3, r10 | 
|  | 651 | mov	r4, r11 | 
|  | 652 | mov	r5, r12 | 
|  | 653 | mov	r6, r13 | 
|  | 654 | mov	r7, r14 | 
|  | 655 | teqp	pc, #PSR_F_BIT | MODE_SVC26 @ back to svc | 
|  | 656 | mov	r0, r0 | 
|  | 657 | stmfd	sp!, {r1-r7} | 
|  | 658 | ldmia	r0, {r0-r7} | 
|  | 659 | stmfd	sp!, {r0-r7} | 
|  | 660 | mov	r0, sp | 
|  | 661 | mov	r1, #BAD_ADDREXCPTN | 
|  | 662 | b	bad_mode | 
|  | 663 |  | 
|  | 664 | /*============================================================================= | 
|  | 665 | * Interrupt (IRQ) handler | 
|  | 666 | *----------------------------------------------------------------------------- | 
|  | 667 | * Note: if the IRQ was taken whilst in user mode, then *no* kernel routine | 
|  | 668 | * is running, so do not have to save svc lr. | 
|  | 669 | * | 
|  | 670 | * Entered in IRQ mode. | 
|  | 671 | */ | 
|  | 672 |  | 
|  | 673 | vector_IRQ:	ldr     sp, .LCirq         @ Setup some temporary stack | 
|  | 674 | sub     lr, lr, #4 | 
|  | 675 | str     lr, [sp]           @ push return address | 
|  | 676 |  | 
|  | 677 | tst     lr, #3 | 
|  | 678 | bne	__irq_non_usr | 
|  | 679 |  | 
|  | 680 | __irq_usr:	teqp	pc, #PSR_I_BIT | MODE_SVC26     @ Enter SVC mode | 
|  | 681 | mov	r0, r0 | 
|  | 682 |  | 
|  | 683 | ldr	lr, .LCirq | 
|  | 684 | ldr	lr, [lr]           @ Restore lr for jump back to USR | 
|  | 685 |  | 
|  | 686 | save_user_regs | 
|  | 687 |  | 
|  | 688 | handle_irq | 
|  | 689 |  | 
|  | 690 | mov	why, #0 | 
|  | 691 | get_thread_info tsk | 
|  | 692 | b	ret_to_user | 
|  | 693 |  | 
|  | 694 | @ Place the IRQ priority table here so that the handle_irq macros above | 
|  | 695 | @ and below here can access it. | 
|  | 696 |  | 
|  | 697 | irq_prio_table | 
|  | 698 |  | 
|  | 699 | __irq_non_usr:	teqp	pc, #PSR_I_BIT | MODE_SVC26     @ Enter SVC mode | 
|  | 700 | mov	r0, r0 | 
|  | 701 |  | 
|  | 702 | save_svc_regs_irq | 
|  | 703 |  | 
|  | 704 | and	r2, lr, #3 | 
|  | 705 | teq	r2, #3 | 
|  | 706 | bne	__irq_invalid                @ IRQ not from SVC mode | 
|  | 707 |  | 
|  | 708 | handle_irq | 
|  | 709 |  | 
|  | 710 | restore_svc_regs | 
|  | 711 |  | 
|  | 712 | __irq_invalid:	mov	r0, sp | 
|  | 713 | mov	r1, #BAD_IRQ | 
|  | 714 | b	bad_mode | 
|  | 715 |  | 
|  | 716 | /*============================================================================= | 
|  | 717 | * Data abort handler code | 
|  | 718 | *----------------------------------------------------------------------------- | 
|  | 719 | * | 
|  | 720 | * This handles both exceptions from user and SVC modes, computes the address | 
|  | 721 | *  range of the problem, and does any correction that is required.  It then | 
|  | 722 | *  calls the kernel data abort routine. | 
|  | 723 | * | 
|  | 724 | * This is where I wish that the ARM would tell you which address aborted. | 
|  | 725 | */ | 
|  | 726 |  | 
|  | 727 | vector_data:	sub	lr, lr, #8		@ Correct lr | 
|  | 728 | tst	lr, #3 | 
|  | 729 | bne	Ldata_not_user | 
|  | 730 | save_user_regs | 
|  | 731 | teqp	pc, #MODE_SVC26 | 
|  | 732 | mask_pc	r0, lr | 
|  | 733 | bl	Ldata_do | 
|  | 734 | b	ret_from_exception | 
|  | 735 |  | 
|  | 736 | Ldata_not_user: | 
|  | 737 | save_svc_regs | 
|  | 738 | and	r2, lr, #3 | 
|  | 739 | teq	r2, #3 | 
|  | 740 | bne	Ldata_illegal_mode | 
|  | 741 | tst	lr, #PSR_I_BIT | 
|  | 742 | teqeqp	pc, #MODE_SVC26 | 
|  | 743 | mask_pc	r0, lr | 
|  | 744 | bl	Ldata_do | 
|  | 745 | restore_svc_regs | 
|  | 746 |  | 
|  | 747 | Ldata_illegal_mode: | 
|  | 748 | mov	r0, sp | 
|  | 749 | mov	r1, #BAD_DATA | 
|  | 750 | b	bad_mode | 
|  | 751 |  | 
|  | 752 | Ldata_do:	mov	r3, sp | 
|  | 753 | ldr	r4, [r0]		@ Get instruction | 
|  | 754 | mov	r2, #0 | 
|  | 755 | tst	r4, #1 << 20		@ Check to see if it is a write instruction | 
|  | 756 | orreq	r2, r2, #FAULT_CODE_WRITE @ Indicate write instruction | 
|  | 757 | mov	r1, r4, lsr #22		@ Now branch to the relevent processing routine | 
|  | 758 | and	r1, r1, #15 << 2 | 
|  | 759 | add	pc, pc, r1 | 
|  | 760 | movs	pc, lr | 
|  | 761 | b	Ldata_unknown | 
|  | 762 | b	Ldata_unknown | 
|  | 763 | b	Ldata_unknown | 
|  | 764 | b	Ldata_unknown | 
|  | 765 | b	Ldata_ldrstr_post	@ ldr	rd, [rn], #m | 
|  | 766 | b	Ldata_ldrstr_numindex	@ ldr	rd, [rn, #m]	@ RegVal | 
|  | 767 | b	Ldata_ldrstr_post	@ ldr	rd, [rn], rm | 
|  | 768 | b	Ldata_ldrstr_regindex	@ ldr	rd, [rn, rm] | 
|  | 769 | b	Ldata_ldmstm		@ ldm*a	rn, <rlist> | 
|  | 770 | b	Ldata_ldmstm		@ ldm*b	rn, <rlist> | 
|  | 771 | b	Ldata_unknown | 
|  | 772 | b	Ldata_unknown | 
|  | 773 | b	Ldata_ldrstr_post	@ ldc	rd, [rn], #m	@ Same as ldr	rd, [rn], #m | 
|  | 774 | b	Ldata_ldcstc_pre	@ ldc	rd, [rn, #m] | 
|  | 775 | b	Ldata_unknown | 
|  | 776 | Ldata_unknown:	@ Part of jumptable | 
|  | 777 | mov	r0, r1 | 
|  | 778 | mov	r1, r4 | 
|  | 779 | mov	r2, r3 | 
|  | 780 | b	baddataabort | 
|  | 781 |  | 
|  | 782 | Ldata_ldrstr_post: | 
|  | 783 | mov	r0, r4, lsr #14		@ Get Rn | 
|  | 784 | and	r0, r0, #15 << 2	@ Mask out reg. | 
|  | 785 | teq	r0, #15 << 2 | 
|  | 786 | ldr	r0, [r3, r0]		@ Get register | 
|  | 787 | biceq	r0, r0, #PCMASK | 
|  | 788 | mov	r1, r0 | 
|  | 789 | #ifdef FAULT_CODE_LDRSTRPOST | 
|  | 790 | orr	r2, r2, #FAULT_CODE_LDRSTRPOST | 
|  | 791 | #endif | 
|  | 792 | b	do_DataAbort | 
|  | 793 |  | 
|  | 794 | Ldata_ldrstr_numindex: | 
|  | 795 | mov	r0, r4, lsr #14		@ Get Rn | 
|  | 796 | and	r0, r0, #15 << 2	@ Mask out reg. | 
|  | 797 | teq	r0, #15 << 2 | 
|  | 798 | ldr	r0, [r3, r0]		@ Get register | 
|  | 799 | mov	r1, r4, lsl #20 | 
|  | 800 | biceq	r0, r0, #PCMASK | 
|  | 801 | tst	r4, #1 << 23 | 
|  | 802 | addne	r0, r0, r1, lsr #20 | 
|  | 803 | subeq	r0, r0, r1, lsr #20 | 
|  | 804 | mov	r1, r0 | 
|  | 805 | #ifdef FAULT_CODE_LDRSTRPRE | 
|  | 806 | orr	r2, r2, #FAULT_CODE_LDRSTRPRE | 
|  | 807 | #endif | 
|  | 808 | b	do_DataAbort | 
|  | 809 |  | 
|  | 810 | Ldata_ldrstr_regindex: | 
|  | 811 | mov	r0, r4, lsr #14		@ Get Rn | 
|  | 812 | and	r0, r0, #15 << 2	@ Mask out reg. | 
|  | 813 | teq	r0, #15 << 2 | 
|  | 814 | ldr	r0, [r3, r0]		@ Get register | 
|  | 815 | and	r7, r4, #15 | 
|  | 816 | biceq	r0, r0, #PCMASK | 
|  | 817 | teq	r7, #15			@ Check for PC | 
|  | 818 | ldr	r7, [r3, r7, lsl #2]	@ Get Rm | 
|  | 819 | and	r8, r4, #0x60		@ Get shift types | 
|  | 820 | biceq	r7, r7, #PCMASK | 
|  | 821 | mov	r9, r4, lsr #7		@ Get shift amount | 
|  | 822 | and	r9, r9, #31 | 
|  | 823 | teq	r8, #0 | 
|  | 824 | moveq	r7, r7, lsl r9 | 
|  | 825 | teq	r8, #0x20		@ LSR shift | 
|  | 826 | moveq	r7, r7, lsr r9 | 
|  | 827 | teq	r8, #0x40		@ ASR shift | 
|  | 828 | moveq	r7, r7, asr r9 | 
|  | 829 | teq	r8, #0x60		@ ROR shift | 
|  | 830 | moveq	r7, r7, ror r9 | 
|  | 831 | tst	r4, #1 << 23 | 
|  | 832 | addne	r0, r0, r7 | 
|  | 833 | subeq	r0, r0, r7		@ Apply correction | 
|  | 834 | mov	r1, r0 | 
|  | 835 | #ifdef FAULT_CODE_LDRSTRREG | 
|  | 836 | orr	r2, r2, #FAULT_CODE_LDRSTRREG | 
|  | 837 | #endif | 
|  | 838 | b	do_DataAbort | 
|  | 839 |  | 
|  | 840 | Ldata_ldmstm: | 
|  | 841 | mov	r7, #0x11 | 
|  | 842 | orr	r7, r7, r7, lsl #8 | 
|  | 843 | and	r0, r4, r7 | 
|  | 844 | and	r1, r4, r7, lsl #1 | 
|  | 845 | add	r0, r0, r1, lsr #1 | 
|  | 846 | and	r1, r4, r7, lsl #2 | 
|  | 847 | add	r0, r0, r1, lsr #2 | 
|  | 848 | and	r1, r4, r7, lsl #3 | 
|  | 849 | add	r0, r0, r1, lsr #3 | 
|  | 850 | add	r0, r0, r0, lsr #8 | 
|  | 851 | add	r0, r0, r0, lsr #4 | 
|  | 852 | and	r7, r0, #15		@ r7 = no. of registers to transfer. | 
|  | 853 | mov	r5, r4, lsr #14		@ Get Rn | 
|  | 854 | and	r5, r5, #15 << 2 | 
|  | 855 | ldr	r0, [r3, r5]		@ Get reg | 
|  | 856 | eor	r6, r4, r4, lsl #2 | 
|  | 857 | tst	r6, #1 << 23		@ Check inc/dec ^ writeback | 
|  | 858 | rsbeq	r7, r7, #0 | 
|  | 859 | add	r7, r0, r7, lsl #2	@ Do correction (signed) | 
|  | 860 | subne	r1, r7, #1 | 
|  | 861 | subeq	r1, r0, #1 | 
|  | 862 | moveq	r0, r7 | 
|  | 863 | tst	r4, #1 << 21		@ Check writeback | 
|  | 864 | strne	r7, [r3, r5] | 
|  | 865 | eor	r6, r4, r4, lsl #1 | 
|  | 866 | tst	r6, #1 << 24		@ Check Pre/Post ^ inc/dec | 
|  | 867 | addeq	r0, r0, #4 | 
|  | 868 | addeq	r1, r1, #4 | 
|  | 869 | teq	r5, #15*4		@ CHECK FOR PC | 
|  | 870 | biceq	r1, r1, #PCMASK | 
|  | 871 | biceq	r0, r0, #PCMASK | 
|  | 872 | #ifdef FAULT_CODE_LDMSTM | 
|  | 873 | orr	r2, r2, #FAULT_CODE_LDMSTM | 
|  | 874 | #endif | 
|  | 875 | b	do_DataAbort | 
|  | 876 |  | 
|  | 877 | Ldata_ldcstc_pre: | 
|  | 878 | mov	r0, r4, lsr #14		@ Get Rn | 
|  | 879 | and	r0, r0, #15 << 2	@ Mask out reg. | 
|  | 880 | teq	r0, #15 << 2 | 
|  | 881 | ldr	r0, [r3, r0]		@ Get register | 
|  | 882 | mov	r1, r4, lsl #24		@ Get offset | 
|  | 883 | biceq	r0, r0, #PCMASK | 
|  | 884 | tst	r4, #1 << 23 | 
|  | 885 | addne	r0, r0, r1, lsr #24 | 
|  | 886 | subeq	r0, r0, r1, lsr #24 | 
|  | 887 | mov	r1, r0 | 
|  | 888 | #ifdef FAULT_CODE_LDCSTC | 
|  | 889 | orr	r2, r2, #FAULT_CODE_LDCSTC | 
|  | 890 | #endif | 
|  | 891 | b	do_DataAbort | 
|  | 892 |  | 
|  | 893 |  | 
|  | 894 | /* | 
|  | 895 | * This is the return code to user mode for abort handlers | 
|  | 896 | */ | 
|  | 897 | ENTRY(ret_from_exception) | 
|  | 898 | get_thread_info tsk | 
|  | 899 | mov	why, #0 | 
|  | 900 | b	ret_to_user | 
|  | 901 |  | 
|  | 902 | .data | 
|  | 903 | ENTRY(fp_enter) | 
|  | 904 | .word	fpe_not_present | 
|  | 905 | .text | 
|  | 906 | /* | 
|  | 907 | * Register switch for older 26-bit only ARMs | 
|  | 908 | */ | 
|  | 909 | ENTRY(__switch_to) | 
|  | 910 | add	r0, r0, #TI_CPU_SAVE | 
|  | 911 | stmia	r0, {r4 - sl, fp, sp, lr} | 
|  | 912 | add	r1, r1, #TI_CPU_SAVE | 
|  | 913 | ldmia	r1, {r4 - sl, fp, sp, pc}^ | 
|  | 914 |  | 
|  | 915 | /* | 
|  | 916 | *============================================================================= | 
|  | 917 | *		Low-level interface code | 
|  | 918 | *----------------------------------------------------------------------------- | 
|  | 919 | *		Trap initialisation | 
|  | 920 | *----------------------------------------------------------------------------- | 
|  | 921 | * | 
|  | 922 | * Note - FIQ code has changed.  The default is a couple of words in 0x1c, 0x20 | 
|  | 923 | * that call _unexp_fiq.  Nowever, we now copy the FIQ routine to 0x1c (removes | 
|  | 924 | * some excess cycles). | 
|  | 925 | * | 
|  | 926 | * What we need to put into 0-0x1c are branches to branch to the kernel. | 
|  | 927 | */ | 
|  | 928 |  | 
|  | 929 | .section ".init.text",#alloc,#execinstr | 
|  | 930 |  | 
|  | 931 | .Ljump_addresses: | 
|  | 932 | swi	SYS_ERROR0 | 
|  | 933 | .word	vector_undefinstr	- 12 | 
|  | 934 | .word	vector_swi		- 16 | 
|  | 935 | .word	vector_prefetch		- 20 | 
|  | 936 | .word	vector_data		- 24 | 
|  | 937 | .word	vector_addrexcptn	- 28 | 
|  | 938 | .word	vector_IRQ		- 32 | 
|  | 939 | .word	_unexp_fiq		- 36 | 
|  | 940 | b	. + 8 | 
|  | 941 | /* | 
|  | 942 | * initialise the trap system | 
|  | 943 | */ | 
|  | 944 | ENTRY(__trap_init) | 
|  | 945 | stmfd	sp!, {r4 - r7, lr} | 
|  | 946 | adr	r1, .Ljump_addresses | 
|  | 947 | ldmia	r1, {r1 - r7, ip, lr} | 
|  | 948 | orr	r2, lr, r2, lsr #2 | 
|  | 949 | orr	r3, lr, r3, lsr #2 | 
|  | 950 | orr	r4, lr, r4, lsr #2 | 
|  | 951 | orr	r5, lr, r5, lsr #2 | 
|  | 952 | orr	r6, lr, r6, lsr #2 | 
|  | 953 | orr	r7, lr, r7, lsr #2 | 
|  | 954 | orr	ip, lr, ip, lsr #2 | 
|  | 955 | mov	r0, #0 | 
|  | 956 | stmia	r0, {r1 - r7, ip} | 
|  | 957 | ldmfd	sp!, {r4 - r7, pc}^ | 
|  | 958 |  | 
|  | 959 | .bss | 
|  | 960 | __temp_irq:	.space	4				@ saved lr_irq | 
|  | 961 | __temp_fiq:	.space	128 |