Thumb-2: Implementation of the unified start-up and exceptions code

This patch implements the ARM/Thumb-2 unified kernel start-up and
exception handling code.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 87ab4e1..a4eaf4f 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -36,11 +36,6 @@
 #endif
 	.endm
 
-	.macro	get_thread_info, rd
-	mov	\rd, sp, lsr #13
-	mov	\rd, \rd, lsl #13
-	.endm
-
 	.macro	alignment_trap, rtemp
 #ifdef CONFIG_ALIGNMENT_TRAP
 	ldr	\rtemp, .LCcralign
@@ -49,6 +44,93 @@
 #endif
 	.endm
 
+	@
+	@ Store/load the USER SP and LR registers by switching to the SYS
+	@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
+	@ available. Should only be called from SVC mode
+	@
+	.macro	store_user_sp_lr, rd, rtemp, offset = 0
+	mrs	\rtemp, cpsr
+	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+	msr	cpsr_c, \rtemp			@ switch to the SYS mode
+
+	str	sp, [\rd, #\offset]		@ save sp_usr
+	str	lr, [\rd, #\offset + 4]		@ save lr_usr
+
+	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
+	.endm
+
+	.macro	load_user_sp_lr, rd, rtemp, offset = 0
+	mrs	\rtemp, cpsr
+	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+	msr	cpsr_c, \rtemp			@ switch to the SYS mode
+
+	ldr	sp, [\rd, #\offset]		@ load sp_usr
+	ldr	lr, [\rd, #\offset + 4]		@ load lr_usr
+
+	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
+	.endm
+
+#ifndef CONFIG_THUMB2_KERNEL
+	.macro	svc_exit, rpsr
+	msr	spsr_cxsf, \rpsr
+	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+	.endm
+
+	.macro	restore_user_regs, fast = 0, offset = 0
+	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
+	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
+	msr	spsr_cxsf, r1			@ save in spsr_svc
+	.if	\fast
+	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
+	.else
+	ldmdb	sp, {r0 - lr}^			@ get calling r0 - lr
+	.endif
+	add	sp, sp, #S_FRAME_SIZE - S_PC
+	movs	pc, lr				@ return & move spsr_svc into cpsr
+	.endm
+
+	.macro	get_thread_info, rd
+	mov	\rd, sp, lsr #13
+	mov	\rd, \rd, lsl #13
+	.endm
+#else	/* CONFIG_THUMB2_KERNEL */
+	.macro	svc_exit, rpsr
+	ldr	r0, [sp, #S_SP]			@ top of the stack
+	ldr	r1, [sp, #S_PC]			@ return address
+	tst	r0, #4				@ orig stack 8-byte aligned?
+	stmdb	r0, {r1, \rpsr}			@ rfe context
+	ldmia	sp, {r0 - r12}
+	ldr	lr, [sp, #S_LR]
+	addeq	sp, sp, #S_FRAME_SIZE - 8	@ aligned
+	addne	sp, sp, #S_FRAME_SIZE - 4	@ not aligned
+	rfeia	sp!
+	.endm
+
+	.macro	restore_user_regs, fast = 0, offset = 0
+	mov	r2, sp
+	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
+	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
+	ldr	lr, [sp, #\offset + S_PC]	@ get pc
+	add	sp, sp, #\offset + S_SP
+	msr	spsr_cxsf, r1			@ save in spsr_svc
+	.if	\fast
+	ldmdb	sp, {r1 - r12}			@ get calling r1 - r12
+	.else
+	ldmdb	sp, {r0 - r12}			@ get calling r0 - r12
+	.endif
+	add	sp, sp, #S_FRAME_SIZE - S_SP
+	movs	pc, lr				@ return & move spsr_svc into cpsr
+	.endm
+
+	.macro	get_thread_info, rd
+	mov	\rd, sp
+	lsr	\rd, \rd, #13
+	mov	\rd, \rd, lsl #13
+	.endm
+#endif	/* !CONFIG_THUMB2_KERNEL */
 
 /*
  * These are the registers used in the syscall handler, and allow us to