Russell King | bce495d | 2005-04-26 15:21:02 +0100 | [diff] [blame] | 1 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #include <linux/linkage.h> |
| 3 | |
| 4 | #include <asm/assembler.h> |
Sam Ravnborg | e6ae744 | 2005-09-09 21:08:59 +0200 | [diff] [blame] | 5 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <asm/errno.h> |
Russell King | bce495d | 2005-04-26 15:21:02 +0100 | [diff] [blame] | 7 | #include <asm/thread_info.h> |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame^] | 8 | #include <asm/v7m.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
| 10 | @ Bad Abort numbers |
| 11 | @ ----------------- |
| 12 | @ |
| 13 | #define BAD_PREFETCH 0 |
| 14 | #define BAD_DATA 1 |
| 15 | #define BAD_ADDREXCPTN 2 |
| 16 | #define BAD_IRQ 3 |
| 17 | #define BAD_UNDEFINSTR 4 |
| 18 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | @ |
Russell King | 925c8a1 | 2005-04-26 15:18:59 +0100 | [diff] [blame] | 20 | @ Most of the stack format comes from struct pt_regs, but with |
| 21 | @ the addition of 8 bytes for storing syscall args 5 and 6. |
Nicolas Pitre | 2dede2d | 2006-01-14 16:18:08 +0000 | [diff] [blame] | 22 | @ This _must_ remain a multiple of 8 for EABI. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | @ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #define S_OFF 8 |
| 25 | |
Russell King | 925c8a1 | 2005-04-26 15:18:59 +0100 | [diff] [blame] | 26 | /* |
| 27 | * The SWI code relies on the fact that R0 is at the bottom of the stack |
| 28 | * (due to slow/fast restore user regs). |
| 29 | */ |
| 30 | #if S_R0 != 0 |
| 31 | #error "Please fix" |
| 32 | #endif |
| 33 | |
Russell King | bce495d | 2005-04-26 15:21:02 +0100 | [diff] [blame] | 34 | .macro zero_fp |
| 35 | #ifdef CONFIG_FRAME_POINTER |
| 36 | mov fp, #0 |
| 37 | #endif |
| 38 | .endm |
| 39 | |
Russell King | 49f680e | 2005-05-31 18:02:00 +0100 | [diff] [blame] | 40 | .macro alignment_trap, rtemp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #ifdef CONFIG_ALIGNMENT_TRAP |
Russell King | 49f680e | 2005-05-31 18:02:00 +0100 | [diff] [blame] | 42 | ldr \rtemp, .LCcralign |
| 43 | ldr \rtemp, [\rtemp] |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | mcr p15, 0, \rtemp, c1, c0 |
| 45 | #endif |
| 46 | .endm |
| 47 | |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame^] | 48 | #ifdef CONFIG_CPU_V7M |
| 49 | /* |
| 50 | * ARMv7-M exception entry/exit macros. |
| 51 | * |
| 52 | * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are |
| 53 | * automatically saved on the current stack (32 words) before |
| 54 | * switching to the exception stack (SP_main). |
| 55 | * |
| 56 | * If exception is taken while in user mode, SP_main is |
| 57 | * empty. Otherwise, SP_main is aligned to 64 bit automatically |
| 58 | * (CCR.STKALIGN set). |
| 59 | * |
| 60 | * Linux assumes that the interrupts are disabled when entering an |
| 61 | * exception handler and it may BUG if this is not the case. Interrupts |
| 62 | * are disabled during entry and reenabled in the exit macro. |
| 63 | * |
| 64 | * v7m_exception_slow_exit is used when returning from SVC or PendSV. |
| 65 | * When returning to kernel mode, we don't return from exception. |
| 66 | */ |
| 67 | .macro v7m_exception_entry |
| 68 | @ determine the location of the registers saved by the core during |
| 69 | @ exception entry. Depending on the mode the cpu was in when the |
| 70 | @ exception happend that is either on the main or the process stack. |
| 71 | @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack |
| 72 | @ was used. |
| 73 | tst lr, #EXC_RET_STACK_MASK |
| 74 | mrsne r12, psp |
| 75 | moveq r12, sp |
| 76 | |
| 77 | @ we cannot rely on r0-r3 and r12 matching the value saved in the |
| 78 | @ exception frame because of tail-chaining. So these have to be |
| 79 | @ reloaded. |
| 80 | ldmia r12!, {r0-r3} |
| 81 | |
| 82 | @ Linux expects to have irqs off. Do it here before taking stack space |
| 83 | cpsid i |
| 84 | |
| 85 | sub sp, #S_FRAME_SIZE-S_IP |
| 86 | stmdb sp!, {r0-r11} |
| 87 | |
| 88 | @ load saved r12, lr, return address and xPSR. |
| 89 | @ r0-r7 are used for signals and never touched from now on. Clobbering |
| 90 | @ r8-r12 is OK. |
| 91 | mov r9, r12 |
| 92 | ldmia r9!, {r8, r10-r12} |
| 93 | |
| 94 | @ calculate the original stack pointer value. |
| 95 | @ r9 currently points to the memory location just above the auto saved |
| 96 | @ xPSR. |
| 97 | @ The cpu might automatically 8-byte align the stack. Bit 9 |
| 98 | @ of the saved xPSR specifies if stack aligning took place. In this case |
| 99 | @ another 32-bit value is included in the stack. |
| 100 | |
| 101 | tst r12, V7M_xPSR_FRAMEPTRALIGN |
| 102 | addne r9, r9, #4 |
| 103 | |
| 104 | @ store saved r12 using str to have a register to hold the base for stm |
| 105 | str r8, [sp, #S_IP] |
| 106 | add r8, sp, #S_SP |
| 107 | @ store r13-r15, xPSR |
| 108 | stmia r8!, {r9-r12} |
| 109 | @ store old_r0 |
| 110 | str r0, [r8] |
| 111 | .endm |
| 112 | |
| 113 | /* |
| 114 | * PENDSV and SVCALL are configured to have the same exception |
| 115 | * priorities. As a kernel thread runs at SVCALL execution priority it |
| 116 | * can never be preempted and so we will never have to return to a |
| 117 | * kernel thread here. |
| 118 | */ |
| 119 | .macro v7m_exception_slow_exit ret_r0 |
| 120 | cpsid i |
| 121 | ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK |
| 122 | |
| 123 | @ read original r12, sp, lr, pc and xPSR |
| 124 | add r12, sp, #S_IP |
| 125 | ldmia r12, {r1-r5} |
| 126 | |
| 127 | @ an exception frame is always 8-byte aligned. To tell the hardware if |
| 128 | @ the sp to be restored is aligned or not set bit 9 of the saved xPSR |
| 129 | @ accordingly. |
| 130 | tst r2, #4 |
| 131 | subne r2, r2, #4 |
| 132 | orrne r5, V7M_xPSR_FRAMEPTRALIGN |
| 133 | biceq r5, V7M_xPSR_FRAMEPTRALIGN |
| 134 | |
| 135 | @ write basic exception frame |
| 136 | stmdb r2!, {r1, r3-r5} |
| 137 | ldmia sp, {r1, r3-r5} |
| 138 | .if \ret_r0 |
| 139 | stmdb r2!, {r0, r3-r5} |
| 140 | .else |
| 141 | stmdb r2!, {r1, r3-r5} |
| 142 | .endif |
| 143 | |
| 144 | @ restore process sp |
| 145 | msr psp, r2 |
| 146 | |
| 147 | @ restore original r4-r11 |
| 148 | ldmia sp!, {r0-r11} |
| 149 | |
| 150 | @ restore main sp |
| 151 | add sp, sp, #S_FRAME_SIZE-S_IP |
| 152 | |
| 153 | cpsie i |
| 154 | bx lr |
| 155 | .endm |
| 156 | #endif /* CONFIG_CPU_V7M */ |
| 157 | |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 158 | @ |
| 159 | @ Store/load the USER SP and LR registers by switching to the SYS |
| 160 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not |
| 161 | @ available. Should only be called from SVC mode |
| 162 | @ |
| 163 | .macro store_user_sp_lr, rd, rtemp, offset = 0 |
| 164 | mrs \rtemp, cpsr |
| 165 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
| 166 | msr cpsr_c, \rtemp @ switch to the SYS mode |
| 167 | |
| 168 | str sp, [\rd, #\offset] @ save sp_usr |
| 169 | str lr, [\rd, #\offset + 4] @ save lr_usr |
| 170 | |
| 171 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
| 172 | msr cpsr_c, \rtemp @ switch back to the SVC mode |
| 173 | .endm |
| 174 | |
| 175 | .macro load_user_sp_lr, rd, rtemp, offset = 0 |
| 176 | mrs \rtemp, cpsr |
| 177 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
| 178 | msr cpsr_c, \rtemp @ switch to the SYS mode |
| 179 | |
| 180 | ldr sp, [\rd, #\offset] @ load sp_usr |
| 181 | ldr lr, [\rd, #\offset + 4] @ load lr_usr |
| 182 | |
| 183 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
| 184 | msr cpsr_c, \rtemp @ switch back to the SVC mode |
| 185 | .endm |
| 186 | |
| 187 | #ifndef CONFIG_THUMB2_KERNEL |
| 188 | .macro svc_exit, rpsr |
| 189 | msr spsr_cxsf, \rpsr |
Russell King | 7db44c7 | 2011-01-17 15:35:37 +0000 | [diff] [blame] | 190 | #if defined(CONFIG_CPU_V6) |
Catalin Marinas | 200b812 | 2009-09-18 23:27:05 +0100 | [diff] [blame] | 191 | ldr r0, [sp] |
| 192 | strex r1, r2, [sp] @ clear the exclusive monitor |
| 193 | ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr |
Russell King | 7db44c7 | 2011-01-17 15:35:37 +0000 | [diff] [blame] | 194 | #elif defined(CONFIG_CPU_32v6K) |
| 195 | clrex @ clear the exclusive monitor |
| 196 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr |
Nicolas Pitre | 9e6ec39 | 2009-09-25 16:28:02 -0400 | [diff] [blame] | 197 | #else |
| 198 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr |
Catalin Marinas | 200b812 | 2009-09-18 23:27:05 +0100 | [diff] [blame] | 199 | #endif |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 200 | .endm |
| 201 | |
| 202 | .macro restore_user_regs, fast = 0, offset = 0 |
| 203 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr |
| 204 | ldr lr, [sp, #\offset + S_PC]! @ get pc |
| 205 | msr spsr_cxsf, r1 @ save in spsr_svc |
Russell King | 7db44c7 | 2011-01-17 15:35:37 +0000 | [diff] [blame] | 206 | #if defined(CONFIG_CPU_V6) |
Catalin Marinas | 200b812 | 2009-09-18 23:27:05 +0100 | [diff] [blame] | 207 | strex r1, r2, [sp] @ clear the exclusive monitor |
Russell King | 7db44c7 | 2011-01-17 15:35:37 +0000 | [diff] [blame] | 208 | #elif defined(CONFIG_CPU_32v6K) |
| 209 | clrex @ clear the exclusive monitor |
Catalin Marinas | 200b812 | 2009-09-18 23:27:05 +0100 | [diff] [blame] | 210 | #endif |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 211 | .if \fast |
| 212 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr |
| 213 | .else |
| 214 | ldmdb sp, {r0 - lr}^ @ get calling r0 - lr |
| 215 | .endif |
Anders Grafström | 8e4971f | 2010-03-15 16:04:14 +0100 | [diff] [blame] | 216 | mov r0, r0 @ ARMv5T and earlier require a nop |
| 217 | @ after ldm {}^ |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 218 | add sp, sp, #S_FRAME_SIZE - S_PC |
| 219 | movs pc, lr @ return & move spsr_svc into cpsr |
| 220 | .endm |
| 221 | |
| 222 | .macro get_thread_info, rd |
| 223 | mov \rd, sp, lsr #13 |
| 224 | mov \rd, \rd, lsl #13 |
| 225 | .endm |
Catalin Marinas | a771fe6 | 2009-10-12 17:31:20 +0100 | [diff] [blame] | 226 | |
| 227 | @ |
| 228 | @ 32-bit wide "mov pc, reg" |
| 229 | @ |
| 230 | .macro movw_pc, reg |
| 231 | mov pc, \reg |
| 232 | .endm |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 233 | #else /* CONFIG_THUMB2_KERNEL */ |
| 234 | .macro svc_exit, rpsr |
Jon Medhurst | 5948106 | 2011-03-18 17:32:44 +0000 | [diff] [blame] | 235 | ldr lr, [sp, #S_SP] @ top of the stack |
| 236 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc |
Catalin Marinas | 200b812 | 2009-09-18 23:27:05 +0100 | [diff] [blame] | 237 | clrex @ clear the exclusive monitor |
Jon Medhurst | 5948106 | 2011-03-18 17:32:44 +0000 | [diff] [blame] | 238 | stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 239 | ldmia sp, {r0 - r12} |
Jon Medhurst | 5948106 | 2011-03-18 17:32:44 +0000 | [diff] [blame] | 240 | mov sp, lr |
| 241 | ldr lr, [sp], #4 |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 242 | rfeia sp! |
| 243 | .endm |
| 244 | |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame^] | 245 | #ifdef CONFIG_CPU_V7M |
| 246 | /* |
| 247 | * Note we don't need to do clrex here as clearing the local monitor is |
| 248 | * part of each exception entry and exit sequence. |
| 249 | */ |
| 250 | .macro restore_user_regs, fast = 0, offset = 0 |
| 251 | .if \offset |
| 252 | add sp, #\offset |
| 253 | .endif |
| 254 | v7m_exception_slow_exit ret_r0 = \fast |
| 255 | .endm |
| 256 | #else /* ifdef CONFIG_CPU_V7M */ |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 257 | .macro restore_user_regs, fast = 0, offset = 0 |
Catalin Marinas | 200b812 | 2009-09-18 23:27:05 +0100 | [diff] [blame] | 258 | clrex @ clear the exclusive monitor |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 259 | mov r2, sp |
| 260 | load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr |
| 261 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr |
| 262 | ldr lr, [sp, #\offset + S_PC] @ get pc |
| 263 | add sp, sp, #\offset + S_SP |
| 264 | msr spsr_cxsf, r1 @ save in spsr_svc |
| 265 | .if \fast |
| 266 | ldmdb sp, {r1 - r12} @ get calling r1 - r12 |
| 267 | .else |
| 268 | ldmdb sp, {r0 - r12} @ get calling r0 - r12 |
| 269 | .endif |
| 270 | add sp, sp, #S_FRAME_SIZE - S_SP |
| 271 | movs pc, lr @ return & move spsr_svc into cpsr |
| 272 | .endm |
Uwe Kleine-König | 19c4d59 | 2010-05-21 18:06:42 +0100 | [diff] [blame^] | 273 | #endif /* ifdef CONFIG_CPU_V7M / else */ |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 274 | |
| 275 | .macro get_thread_info, rd |
| 276 | mov \rd, sp |
| 277 | lsr \rd, \rd, #13 |
| 278 | mov \rd, \rd, lsl #13 |
| 279 | .endm |
Catalin Marinas | a771fe6 | 2009-10-12 17:31:20 +0100 | [diff] [blame] | 280 | |
| 281 | @ |
| 282 | @ 32-bit wide "mov pc, reg" |
| 283 | @ |
| 284 | .macro movw_pc, reg |
| 285 | mov pc, \reg |
| 286 | nop |
| 287 | .endm |
Catalin Marinas | b86040a | 2009-07-24 12:32:54 +0100 | [diff] [blame] | 288 | #endif /* !CONFIG_THUMB2_KERNEL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | |
| 290 | /* |
| 291 | * These are the registers used in the syscall handler, and allow us to |
| 292 | * have in theory up to 7 arguments to a function - r0 to r6. |
| 293 | * |
| 294 | * r7 is reserved for the system call number for thumb mode. |
| 295 | * |
| 296 | * Note that tbl == why is intentional. |
| 297 | * |
| 298 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. |
| 299 | */ |
| 300 | scno .req r7 @ syscall number |
| 301 | tbl .req r8 @ syscall table pointer |
| 302 | why .req r8 @ Linux syscall (!= 0) |
| 303 | tsk .req r9 @ current thread_info |