blob: 0ace2acbbad0489b4537b8a61cb8c41fd8355b2c [file] [log] [blame]
Chris Zankel5a0015d2005-06-23 22:01:16 -07001/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
Marc Gauthier2d1c6452013-01-05 04:57:17 +040010 * Copyright (C) 2004 - 2008 by Tensilica Inc.
Chris Zankel5a0015d2005-06-23 22:01:16 -070011 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/linkage.h>
Sam Ravnborg0013a852005-09-09 20:57:26 +020017#include <asm/asm-offsets.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070018#include <asm/processor.h>
Chris Zankel4573e392010-05-02 01:05:13 -070019#include <asm/coprocessor.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070020#include <asm/thread_info.h>
21#include <asm/uaccess.h>
22#include <asm/unistd.h>
23#include <asm/ptrace.h>
24#include <asm/current.h>
25#include <asm/pgtable.h>
26#include <asm/page.h>
27#include <asm/signal.h>
Chris Zankel173d6682006-12-10 02:18:48 -080028#include <asm/tlbflush.h>
Chris Zankel367b8112008-11-06 06:40:46 -080029#include <variant/tie-asm.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070030
31/* Unimplemented features. */
32
Chris Zankel5a0015d2005-06-23 22:01:16 -070033#undef KERNEL_STACK_OVERFLOW_CHECK
34#undef PREEMPTIBLE_KERNEL
35#undef ALLOCA_EXCEPTION_IN_IRAM
36
37/* Not well tested.
38 *
39 * - fast_coprocessor
40 */
41
42/*
43 * Macro to find first bit set in WINDOWBASE from the left + 1
44 *
45 * 100....0 -> 1
46 * 010....0 -> 2
47 * 000....1 -> WSBITS
48 */
49
50 .macro ffs_ws bit mask
51
52#if XCHAL_HAVE_NSA
53 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
54 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
55#else
56 movi \bit, WSBITS
57#if WSBITS > 16
58 _bltui \mask, 0x10000, 99f
59 addi \bit, \bit, -16
60 extui \mask, \mask, 16, 16
61#endif
62#if WSBITS > 8
6399: _bltui \mask, 0x100, 99f
64 addi \bit, \bit, -8
65 srli \mask, \mask, 8
66#endif
6799: _bltui \mask, 0x10, 99f
68 addi \bit, \bit, -4
69 srli \mask, \mask, 4
7099: _bltui \mask, 0x4, 99f
71 addi \bit, \bit, -2
72 srli \mask, \mask, 2
7399: _bltui \mask, 0x2, 99f
74 addi \bit, \bit, -1
7599:
76
77#endif
78 .endm
79
80/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
81
82/*
83 * First-level exception handler for user exceptions.
84 * Save some special registers, extra states and all registers in the AR
85 * register file that were in use in the user task, and jump to the common
86 * exception code.
87 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
88 * save them for kernel exceptions).
89 *
90 * Entry condition for user_exception:
91 *
92 * a0: trashed, original value saved on stack (PT_AREG0)
93 * a1: a1
94 * a2: new stack pointer, original value in depc
95 * a3: dispatch table
96 * depc: a2, original value saved on stack (PT_DEPC)
97 * excsave1: a3
98 *
99 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
100 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
101 *
102 * Entry condition for _user_exception:
103 *
104 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
105 * excsave has been restored, and
106 * stack pointer (a1) has been set.
107 *
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800108 * Note: _user_exception might be at an odd address. Don't use call0..call12
Chris Zankel5a0015d2005-06-23 22:01:16 -0700109 */
110
111ENTRY(user_exception)
112
113 /* Save a2, a3, and depc, restore excsave_1 and set SP. */
114
Max Filippovbc5378f2012-10-15 03:55:38 +0400115 xsr a3, excsave1
116 rsr a0, depc
Chris Zankel5a0015d2005-06-23 22:01:16 -0700117 s32i a1, a2, PT_AREG1
118 s32i a0, a2, PT_AREG2
119 s32i a3, a2, PT_AREG3
120 mov a1, a2
121
122 .globl _user_exception
123_user_exception:
124
125 /* Save SAR and turn off single stepping */
126
127 movi a2, 0
Max Filippovbc5378f2012-10-15 03:55:38 +0400128 rsr a3, sar
129 xsr a2, icountlevel
Chris Zankel5a0015d2005-06-23 22:01:16 -0700130 s32i a3, a1, PT_SAR
Chris Zankel29c4dfd2007-05-31 17:49:32 -0700131 s32i a2, a1, PT_ICOUNTLEVEL
Chris Zankel5a0015d2005-06-23 22:01:16 -0700132
133 /* Rotate ws so that the current windowbase is at bit0. */
134 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
135
Max Filippovbc5378f2012-10-15 03:55:38 +0400136 rsr a2, windowbase
137 rsr a3, windowstart
Chris Zankel5a0015d2005-06-23 22:01:16 -0700138 ssr a2
139 s32i a2, a1, PT_WINDOWBASE
140 s32i a3, a1, PT_WINDOWSTART
141 slli a2, a3, 32-WSBITS
142 src a2, a3, a2
143 srli a2, a2, 32-WSBITS
144 s32i a2, a1, PT_WMASK # needed for restoring registers
145
146 /* Save only live registers. */
147
148 _bbsi.l a2, 1, 1f
149 s32i a4, a1, PT_AREG4
150 s32i a5, a1, PT_AREG5
151 s32i a6, a1, PT_AREG6
152 s32i a7, a1, PT_AREG7
153 _bbsi.l a2, 2, 1f
154 s32i a8, a1, PT_AREG8
155 s32i a9, a1, PT_AREG9
156 s32i a10, a1, PT_AREG10
157 s32i a11, a1, PT_AREG11
158 _bbsi.l a2, 3, 1f
159 s32i a12, a1, PT_AREG12
160 s32i a13, a1, PT_AREG13
161 s32i a14, a1, PT_AREG14
162 s32i a15, a1, PT_AREG15
163 _bnei a2, 1, 1f # only one valid frame?
164
165 /* Only one valid frame, skip saving regs. */
166
167 j 2f
168
169 /* Save the remaining registers.
170 * We have to save all registers up to the first '1' from
171 * the right, except the current frame (bit 0).
172 * Assume a2 is: 001001000110001
Chris Zankel66569202007-08-22 10:14:51 -0700173 * All register frames starting from the top field to the marked '1'
Chris Zankel5a0015d2005-06-23 22:01:16 -0700174 * must be saved.
175 */
176
1771: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
178 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
179 and a3, a3, a2 # max. only one bit is set
180
181 /* Find number of frames to save */
182
183 ffs_ws a0, a3 # number of frames to the '1' from left
184
185 /* Store information into WMASK:
186 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
187 * bits 4...: number of valid 4-register frames
188 */
189
190 slli a3, a0, 4 # number of frames to save in bits 8..4
191 extui a2, a2, 0, 4 # mask for the first 16 registers
192 or a2, a3, a2
193 s32i a2, a1, PT_WMASK # needed when we restore the reg-file
194
195 /* Save 4 registers at a time */
196
1971: rotw -1
198 s32i a0, a5, PT_AREG_END - 16
199 s32i a1, a5, PT_AREG_END - 12
200 s32i a2, a5, PT_AREG_END - 8
201 s32i a3, a5, PT_AREG_END - 4
202 addi a0, a4, -1
203 addi a1, a5, -16
204 _bnez a0, 1b
205
206 /* WINDOWBASE still in SAR! */
207
Max Filippovbc5378f2012-10-15 03:55:38 +0400208 rsr a2, sar # original WINDOWBASE
Chris Zankel5a0015d2005-06-23 22:01:16 -0700209 movi a3, 1
210 ssl a2
211 sll a3, a3
Max Filippovbc5378f2012-10-15 03:55:38 +0400212 wsr a3, windowstart # set corresponding WINDOWSTART bit
213 wsr a2, windowbase # and WINDOWSTART
Chris Zankel5a0015d2005-06-23 22:01:16 -0700214 rsync
215
216 /* We are back to the original stack pointer (a1) */
217
Chris Zankelc658eac2008-02-12 13:17:07 -08002182: /* Now, jump to the common exception handler. */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700219
220 j common_exception
221
Chris Zankeld1538c42012-11-16 16:16:20 -0800222ENDPROC(user_exception)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700223
224/*
225 * First-level exit handler for kernel exceptions
226 * Save special registers and the live window frame.
227 * Note: Even though we changes the stack pointer, we don't have to do a
228 * MOVSP here, as we do that when we return from the exception.
229 * (See comment in the kernel exception exit code)
230 *
231 * Entry condition for kernel_exception:
232 *
233 * a0: trashed, original value saved on stack (PT_AREG0)
234 * a1: a1
235 * a2: new stack pointer, original in DEPC
236 * a3: dispatch table
237 * depc: a2, original value saved on stack (PT_DEPC)
238 * excsave_1: a3
239 *
240 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
241 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
242 *
243 * Entry condition for _kernel_exception:
244 *
245 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
246 * excsave has been restored, and
247 * stack pointer (a1) has been set.
248 *
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800249 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
Chris Zankel5a0015d2005-06-23 22:01:16 -0700250 */
251
252ENTRY(kernel_exception)
253
254 /* Save a0, a2, a3, DEPC and set SP. */
255
Max Filippovbc5378f2012-10-15 03:55:38 +0400256 xsr a3, excsave1 # restore a3, excsave_1
257 rsr a0, depc # get a2
Chris Zankel5a0015d2005-06-23 22:01:16 -0700258 s32i a1, a2, PT_AREG1
259 s32i a0, a2, PT_AREG2
260 s32i a3, a2, PT_AREG3
261 mov a1, a2
262
263 .globl _kernel_exception
264_kernel_exception:
265
266 /* Save SAR and turn off single stepping */
267
268 movi a2, 0
Max Filippovbc5378f2012-10-15 03:55:38 +0400269 rsr a3, sar
270 xsr a2, icountlevel
Chris Zankel5a0015d2005-06-23 22:01:16 -0700271 s32i a3, a1, PT_SAR
Chris Zankel29c4dfd2007-05-31 17:49:32 -0700272 s32i a2, a1, PT_ICOUNTLEVEL
Chris Zankel5a0015d2005-06-23 22:01:16 -0700273
274 /* Rotate ws so that the current windowbase is at bit0. */
275 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
276
Max Filippovbc5378f2012-10-15 03:55:38 +0400277 rsr a2, windowbase # don't need to save these, we only
278 rsr a3, windowstart # need shifted windowstart: windowmask
Chris Zankel5a0015d2005-06-23 22:01:16 -0700279 ssr a2
280 slli a2, a3, 32-WSBITS
281 src a2, a3, a2
282 srli a2, a2, 32-WSBITS
283 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
284
285 /* Save only the live window-frame */
286
287 _bbsi.l a2, 1, 1f
288 s32i a4, a1, PT_AREG4
289 s32i a5, a1, PT_AREG5
290 s32i a6, a1, PT_AREG6
291 s32i a7, a1, PT_AREG7
292 _bbsi.l a2, 2, 1f
293 s32i a8, a1, PT_AREG8
294 s32i a9, a1, PT_AREG9
295 s32i a10, a1, PT_AREG10
296 s32i a11, a1, PT_AREG11
297 _bbsi.l a2, 3, 1f
298 s32i a12, a1, PT_AREG12
299 s32i a13, a1, PT_AREG13
300 s32i a14, a1, PT_AREG14
301 s32i a15, a1, PT_AREG15
302
3031:
304
305#ifdef KERNEL_STACK_OVERFLOW_CHECK
306
307 /* Stack overflow check, for debugging */
308 extui a2, a1, TASK_SIZE_BITS,XX
309 movi a3, SIZE??
310 _bge a2, a3, out_of_stack_panic
311
312#endif
313
314/*
315 * This is the common exception handler.
316 * We get here from the user exception handler or simply by falling through
317 * from the kernel exception handler.
318 * Save the remaining special registers, switch to kernel mode, and jump
319 * to the second-level exception handler.
320 *
321 */
322
323common_exception:
324
Chris Zankel29c4dfd2007-05-31 17:49:32 -0700325 /* Save some registers, disable loops and clear the syscall flag. */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700326
Max Filippovbc5378f2012-10-15 03:55:38 +0400327 rsr a2, debugcause
328 rsr a3, epc1
Chris Zankel5a0015d2005-06-23 22:01:16 -0700329 s32i a2, a1, PT_DEBUGCAUSE
330 s32i a3, a1, PT_PC
331
Chris Zankel29c4dfd2007-05-31 17:49:32 -0700332 movi a2, -1
Max Filippovbc5378f2012-10-15 03:55:38 +0400333 rsr a3, excvaddr
Chris Zankel29c4dfd2007-05-31 17:49:32 -0700334 s32i a2, a1, PT_SYSCALL
Chris Zankel5a0015d2005-06-23 22:01:16 -0700335 movi a2, 0
336 s32i a3, a1, PT_EXCVADDR
Max Filippovbc5378f2012-10-15 03:55:38 +0400337 xsr a2, lcount
Chris Zankel5a0015d2005-06-23 22:01:16 -0700338 s32i a2, a1, PT_LCOUNT
339
340 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
341
Max Filippovbc5378f2012-10-15 03:55:38 +0400342 rsr a0, exccause
Chris Zankel5a0015d2005-06-23 22:01:16 -0700343 movi a3, 0
Max Filippovbc5378f2012-10-15 03:55:38 +0400344 rsr a2, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -0700345 s32i a0, a1, PT_EXCCAUSE
346 s32i a3, a2, EXC_TABLE_FIXUP
347
348 /* All unrecoverable states are saved on stack, now, and a1 is valid,
349 * so we can allow exceptions and interrupts (*) again.
350 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
351 *
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400352 * (*) We only allow interrupts of higher priority than current IRQ
Chris Zankel5a0015d2005-06-23 22:01:16 -0700353 */
354
Max Filippovbc5378f2012-10-15 03:55:38 +0400355 rsr a3, ps
Chris Zankel5a0015d2005-06-23 22:01:16 -0700356 addi a0, a0, -4
357 movi a2, 1
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400358 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
359 # a3 = PS.INTLEVEL
360 movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority
361 moveqz a3, a2, a0 # a3 = IRQ level iff interrupt
Chris Zankel173d6682006-12-10 02:18:48 -0800362 movi a2, 1 << PS_WOE_BIT
Chris Zankel5a0015d2005-06-23 22:01:16 -0700363 or a3, a3, a2
Max Filippovbc5378f2012-10-15 03:55:38 +0400364 rsr a0, exccause
365 xsr a3, ps
Chris Zankel5a0015d2005-06-23 22:01:16 -0700366
367 s32i a3, a1, PT_PS # save ps
368
Max Filippovbc5378f2012-10-15 03:55:38 +0400369 /* Save lbeg, lend */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700370
Max Filippovbc5378f2012-10-15 03:55:38 +0400371 rsr a2, lbeg
372 rsr a3, lend
Chris Zankel5a0015d2005-06-23 22:01:16 -0700373 s32i a2, a1, PT_LBEG
374 s32i a3, a1, PT_LEND
375
Max Filippov733536b2012-11-15 06:25:48 +0400376 /* Save SCOMPARE1 */
377
378#if XCHAL_HAVE_S32C1I
379 rsr a2, scompare1
380 s32i a2, a1, PT_SCOMPARE1
381#endif
382
Chris Zankelc658eac2008-02-12 13:17:07 -0800383 /* Save optional registers. */
384
385 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
386
Chris Zankel5a0015d2005-06-23 22:01:16 -0700387 /* Go to second-level dispatcher. Set up parameters to pass to the
388 * exception handler and call the exception handler.
389 */
390
391 movi a4, exc_table
392 mov a6, a1 # pass stack frame
393 mov a7, a0 # pass EXCCAUSE
394 addx4 a4, a0, a4
395 l32i a4, a4, EXC_TABLE_DEFAULT # load handler
396
397 /* Call the second-level handler */
398
399 callx4 a4
400
401 /* Jump here for exception exit */
402
403common_exception_return:
404
405 /* Jump if we are returning from kernel exceptions. */
406
4071: l32i a3, a1, PT_PS
Chris Zankele1088432008-01-22 00:45:25 -0800408 _bbci.l a3, PS_UM_BIT, 4f
Chris Zankel5a0015d2005-06-23 22:01:16 -0700409
410 /* Specific to a user exception exit:
411 * We need to check some flags for signal handling and rescheduling,
412 * and have to restore WB and WS, extra states, and all registers
413 * in the register file that were in use in the user task.
Chris Zankele1088432008-01-22 00:45:25 -0800414 * Note that we don't disable interrupts here.
Chris Zankel5a0015d2005-06-23 22:01:16 -0700415 */
416
417 GET_THREAD_INFO(a2,a1)
418 l32i a4, a2, TI_FLAGS
419
Chris Zankel5a0015d2005-06-23 22:01:16 -0700420 _bbsi.l a4, TIF_NEED_RESCHED, 3f
Al Viroa53bb242012-04-24 02:30:16 -0400421 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
Chris Zankel5a0015d2005-06-23 22:01:16 -0700422 _bbci.l a4, TIF_SIGPENDING, 4f
423
Al Viroa53bb242012-04-24 02:30:16 -04004242: l32i a4, a1, PT_DEPC
Chris Zankel5a0015d2005-06-23 22:01:16 -0700425 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
Chris Zankel5a0015d2005-06-23 22:01:16 -0700426
Chris Zankele1088432008-01-22 00:45:25 -0800427 /* Call do_signal() */
428
Al Viroa53bb242012-04-24 02:30:16 -0400429 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700430 mov a6, a1
Chris Zankel5a0015d2005-06-23 22:01:16 -0700431 callx4 a4
432 j 1b
433
Chris Zankele1088432008-01-22 00:45:25 -08004343: /* Reschedule */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700435
Chris Zankel5a0015d2005-06-23 22:01:16 -0700436 movi a4, schedule # void schedule (void)
437 callx4 a4
438 j 1b
439
Chris Zankele1088432008-01-22 00:45:25 -08004404: /* Restore optional registers. */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700441
Chris Zankele1088432008-01-22 00:45:25 -0800442 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
443
Max Filippov733536b2012-11-15 06:25:48 +0400444 /* Restore SCOMPARE1 */
445
446#if XCHAL_HAVE_S32C1I
447 l32i a2, a1, PT_SCOMPARE1
448 wsr a2, scompare1
449#endif
Max Filippovbc5378f2012-10-15 03:55:38 +0400450 wsr a3, ps /* disable interrupts */
Chris Zankele1088432008-01-22 00:45:25 -0800451
452 _bbci.l a3, PS_UM_BIT, kernel_exception_exit
453
454user_exception_exit:
455
456 /* Restore the state of the task and return from the exception. */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700457
Chris Zankel5a0015d2005-06-23 22:01:16 -0700458 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
459
460 l32i a2, a1, PT_WINDOWBASE
461 l32i a3, a1, PT_WINDOWSTART
Max Filippovbc5378f2012-10-15 03:55:38 +0400462 wsr a1, depc # use DEPC as temp storage
463 wsr a3, windowstart # restore WINDOWSTART
Chris Zankel5a0015d2005-06-23 22:01:16 -0700464 ssr a2 # preserve user's WB in the SAR
Max Filippovbc5378f2012-10-15 03:55:38 +0400465 wsr a2, windowbase # switch to user's saved WB
Chris Zankel5a0015d2005-06-23 22:01:16 -0700466 rsync
Max Filippovbc5378f2012-10-15 03:55:38 +0400467 rsr a1, depc # restore stack pointer
Chris Zankel5a0015d2005-06-23 22:01:16 -0700468 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
469 rotw -1 # we restore a4..a7
470 _bltui a6, 16, 1f # only have to restore current window?
471
472 /* The working registers are a0 and a3. We are restoring to
473 * a4..a7. Be careful not to destroy what we have just restored.
474 * Note: wmask has the format YYYYM:
475 * Y: number of registers saved in groups of 4
476 * M: 4 bit mask of first 16 registers
477 */
478
479 mov a2, a6
480 mov a3, a5
481
4822: rotw -1 # a0..a3 become a4..a7
483 addi a3, a7, -4*4 # next iteration
484 addi a2, a6, -16 # decrementing Y in WMASK
485 l32i a4, a3, PT_AREG_END + 0
486 l32i a5, a3, PT_AREG_END + 4
487 l32i a6, a3, PT_AREG_END + 8
488 l32i a7, a3, PT_AREG_END + 12
489 _bgeui a2, 16, 2b
490
491 /* Clear unrestored registers (don't leak anything to user-land */
492
Max Filippovbc5378f2012-10-15 03:55:38 +04004931: rsr a0, windowbase
494 rsr a3, sar
Chris Zankel5a0015d2005-06-23 22:01:16 -0700495 sub a3, a0, a3
496 beqz a3, 2f
497 extui a3, a3, 0, WBBITS
498
4991: rotw -1
500 addi a3, a7, -1
501 movi a4, 0
502 movi a5, 0
503 movi a6, 0
504 movi a7, 0
505 bgei a3, 1, 1b
506
507 /* We are back were we were when we started.
508 * Note: a2 still contains WMASK (if we've returned to the original
509 * frame where we had loaded a2), or at least the lower 4 bits
510 * (if we have restored WSBITS-1 frames).
511 */
512
5132: j common_exception_exit
514
515 /* This is the kernel exception exit.
516 * We avoided to do a MOVSP when we entered the exception, but we
517 * have to do it here.
518 */
519
520kernel_exception_exit:
521
Chris Zankel5a0015d2005-06-23 22:01:16 -0700522#ifdef PREEMPTIBLE_KERNEL
523
524#ifdef CONFIG_PREEMPT
525
526 /*
527 * Note: We've just returned from a call4, so we have
528 * at least 4 addt'l regs.
529 */
530
531 /* Check current_thread_info->preempt_count */
532
533 GET_THREAD_INFO(a2)
534 l32i a3, a2, TI_PREEMPT
535 bnez a3, 1f
536
537 l32i a2, a2, TI_FLAGS
538
5391:
540
541#endif
542
543#endif
544
545 /* Check if we have to do a movsp.
546 *
547 * We only have to do a movsp if the previous window-frame has
548 * been spilled to the *temporary* exception stack instead of the
549 * task's stack. This is the case if the corresponding bit in
550 * WINDOWSTART for the previous window-frame was set before
551 * (not spilled) but is zero now (spilled).
552 * If this bit is zero, all other bits except the one for the
553 * current window frame are also zero. So, we can use a simple test:
554 * 'and' WINDOWSTART and WINDOWSTART-1:
555 *
556 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
557 *
558 * The result is zero only if one bit was set.
559 *
560 * (Note: We might have gone through several task switches before
561 * we come back to the current task, so WINDOWBASE might be
562 * different from the time the exception occurred.)
563 */
564
565 /* Test WINDOWSTART before and after the exception.
566 * We actually have WMASK, so we only have to test if it is 1 or not.
567 */
568
569 l32i a2, a1, PT_WMASK
570 _beqi a2, 1, common_exception_exit # Spilled before exception,jump
571
572 /* Test WINDOWSTART now. If spilled, do the movsp */
573
Max Filippovbc5378f2012-10-15 03:55:38 +0400574 rsr a3, windowstart
Chris Zankel5a0015d2005-06-23 22:01:16 -0700575 addi a0, a3, -1
576 and a3, a3, a0
577 _bnez a3, common_exception_exit
578
579 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
580
581 addi a0, a1, -16
582 l32i a3, a0, 0
583 l32i a4, a0, 4
584 s32i a3, a1, PT_SIZE+0
585 s32i a4, a1, PT_SIZE+4
586 l32i a3, a0, 8
587 l32i a4, a0, 12
588 s32i a3, a1, PT_SIZE+8
589 s32i a4, a1, PT_SIZE+12
590
591 /* Common exception exit.
592 * We restore the special register and the current window frame, and
593 * return from the exception.
594 *
595 * Note: We expect a2 to hold PT_WMASK
596 */
597
598common_exception_exit:
599
Chris Zankelc658eac2008-02-12 13:17:07 -0800600 /* Restore address registers. */
601
Chris Zankel5a0015d2005-06-23 22:01:16 -0700602 _bbsi.l a2, 1, 1f
603 l32i a4, a1, PT_AREG4
604 l32i a5, a1, PT_AREG5
605 l32i a6, a1, PT_AREG6
606 l32i a7, a1, PT_AREG7
607 _bbsi.l a2, 2, 1f
608 l32i a8, a1, PT_AREG8
609 l32i a9, a1, PT_AREG9
610 l32i a10, a1, PT_AREG10
611 l32i a11, a1, PT_AREG11
612 _bbsi.l a2, 3, 1f
613 l32i a12, a1, PT_AREG12
614 l32i a13, a1, PT_AREG13
615 l32i a14, a1, PT_AREG14
616 l32i a15, a1, PT_AREG15
617
618 /* Restore PC, SAR */
619
6201: l32i a2, a1, PT_PC
621 l32i a3, a1, PT_SAR
Max Filippovbc5378f2012-10-15 03:55:38 +0400622 wsr a2, epc1
623 wsr a3, sar
Chris Zankel5a0015d2005-06-23 22:01:16 -0700624
625 /* Restore LBEG, LEND, LCOUNT */
626
627 l32i a2, a1, PT_LBEG
628 l32i a3, a1, PT_LEND
Max Filippovbc5378f2012-10-15 03:55:38 +0400629 wsr a2, lbeg
Chris Zankel5a0015d2005-06-23 22:01:16 -0700630 l32i a2, a1, PT_LCOUNT
Max Filippovbc5378f2012-10-15 03:55:38 +0400631 wsr a3, lend
632 wsr a2, lcount
Chris Zankel5a0015d2005-06-23 22:01:16 -0700633
Chris Zankel29c4dfd2007-05-31 17:49:32 -0700634 /* We control single stepping through the ICOUNTLEVEL register. */
635
636 l32i a2, a1, PT_ICOUNTLEVEL
637 movi a3, -2
Max Filippovbc5378f2012-10-15 03:55:38 +0400638 wsr a2, icountlevel
639 wsr a3, icount
Chris Zankel29c4dfd2007-05-31 17:49:32 -0700640
Chris Zankel5a0015d2005-06-23 22:01:16 -0700641 /* Check if it was double exception. */
642
643 l32i a0, a1, PT_DEPC
644 l32i a3, a1, PT_AREG3
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400645 _bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
Chris Zankel5a0015d2005-06-23 22:01:16 -0700646
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400647 wsr a0, depc
648 l32i a2, a1, PT_AREG2
649 l32i a0, a1, PT_AREG0
650 l32i a1, a1, PT_AREG1
651 rfde
652
6531:
Chris Zankel5a0015d2005-06-23 22:01:16 -0700654 /* Restore a0...a3 and return */
655
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400656 rsr a0, ps
657 extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
658 movi a0, 2f
659 slli a2, a2, 4
660 add a0, a2, a0
661 l32i a2, a1, PT_AREG2
662 jx a0
663
664 .macro irq_exit_level level
665 .align 16
666 .if XCHAL_EXCM_LEVEL >= \level
667 l32i a0, a1, PT_PC
668 wsr a0, epc\level
669 l32i a0, a1, PT_AREG0
670 l32i a1, a1, PT_AREG1
671 rfi \level
672 .endif
673 .endm
674
675 .align 16
6762:
Chris Zankel5a0015d2005-06-23 22:01:16 -0700677 l32i a0, a1, PT_AREG0
678 l32i a1, a1, PT_AREG1
679 rfe
680
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400681 .align 16
682 /* no rfi for level-1 irq, handled by rfe above*/
683 nop
684
685 irq_exit_level 2
686 irq_exit_level 3
687 irq_exit_level 4
688 irq_exit_level 5
689 irq_exit_level 6
Chris Zankel5a0015d2005-06-23 22:01:16 -0700690
Chris Zankeld1538c42012-11-16 16:16:20 -0800691ENDPROC(kernel_exception)
692
Chris Zankel5a0015d2005-06-23 22:01:16 -0700693/*
694 * Debug exception handler.
695 *
696 * Currently, we don't support KGDB, so only user application can be debugged.
697 *
698 * When we get here, a0 is trashed and saved to excsave[debuglevel]
699 */
700
701ENTRY(debug_exception)
702
Max Filippovbc5378f2012-10-15 03:55:38 +0400703 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
Chris Zankel173d6682006-12-10 02:18:48 -0800704 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
Chris Zankel5a0015d2005-06-23 22:01:16 -0700705
Max Filippovbc5378f2012-10-15 03:55:38 +0400706 /* Set EPC1 and EXCCAUSE */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700707
Max Filippovbc5378f2012-10-15 03:55:38 +0400708 wsr a2, depc # save a2 temporarily
709 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
710 wsr a2, epc1
Chris Zankel5a0015d2005-06-23 22:01:16 -0700711
712 movi a2, EXCCAUSE_MAPPED_DEBUG
Max Filippovbc5378f2012-10-15 03:55:38 +0400713 wsr a2, exccause
Chris Zankel5a0015d2005-06-23 22:01:16 -0700714
715 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
716
Chris Zankel173d6682006-12-10 02:18:48 -0800717 movi a2, 1 << PS_EXCM_BIT
Chris Zankel5a0015d2005-06-23 22:01:16 -0700718 or a2, a0, a2
719 movi a0, debug_exception # restore a3, debug jump vector
Max Filippovbc5378f2012-10-15 03:55:38 +0400720 wsr a2, ps
721 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
Chris Zankel5a0015d2005-06-23 22:01:16 -0700722
723 /* Switch to kernel/user stack, restore jump vector, and save a0 */
724
Chris Zankel173d6682006-12-10 02:18:48 -0800725 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
Chris Zankel5a0015d2005-06-23 22:01:16 -0700726
727 addi a2, a1, -16-PT_SIZE # assume kernel stack
728 s32i a0, a2, PT_AREG0
729 movi a0, 0
730 s32i a1, a2, PT_AREG1
731 s32i a0, a2, PT_DEPC # mark it as a regular exception
Max Filippovbc5378f2012-10-15 03:55:38 +0400732 xsr a0, depc
Chris Zankel5a0015d2005-06-23 22:01:16 -0700733 s32i a3, a2, PT_AREG3
734 s32i a0, a2, PT_AREG2
735 mov a1, a2
736 j _kernel_exception
737
Max Filippovbc5378f2012-10-15 03:55:38 +04007382: rsr a2, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -0700739 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
740 s32i a0, a2, PT_AREG0
741 movi a0, 0
742 s32i a1, a2, PT_AREG1
743 s32i a0, a2, PT_DEPC
Max Filippovbc5378f2012-10-15 03:55:38 +0400744 xsr a0, depc
Chris Zankel5a0015d2005-06-23 22:01:16 -0700745 s32i a3, a2, PT_AREG3
746 s32i a0, a2, PT_AREG2
747 mov a1, a2
748 j _user_exception
749
750 /* Debug exception while in exception mode. */
7511: j 1b // FIXME!!
752
Chris Zankeld1538c42012-11-16 16:16:20 -0800753ENDPROC(debug_exception)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700754
755/*
756 * We get here in case of an unrecoverable exception.
757 * The only thing we can do is to be nice and print a panic message.
758 * We only produce a single stack frame for panic, so ???
759 *
760 *
761 * Entry conditions:
762 *
763 * - a0 contains the caller address; original value saved in excsave1.
764 * - the original a0 contains a valid return address (backtrace) or 0.
765 * - a2 contains a valid stackpointer
766 *
767 * Notes:
768 *
769 * - If the stack pointer could be invalid, the caller has to setup a
770 * dummy stack pointer (e.g. the stack of the init_task)
771 *
772 * - If the return address could be invalid, the caller has to set it
773 * to 0, so the backtrace would stop.
774 *
775 */
776 .align 4
777unrecoverable_text:
778 .ascii "Unrecoverable error in exception handler\0"
779
780ENTRY(unrecoverable_exception)
781
782 movi a0, 1
783 movi a1, 0
784
Max Filippovbc5378f2012-10-15 03:55:38 +0400785 wsr a0, windowstart
786 wsr a1, windowbase
Chris Zankel5a0015d2005-06-23 22:01:16 -0700787 rsync
788
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400789 movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
Max Filippovbc5378f2012-10-15 03:55:38 +0400790 wsr a1, ps
Chris Zankel5a0015d2005-06-23 22:01:16 -0700791 rsync
792
793 movi a1, init_task
794 movi a0, 0
795 addi a1, a1, PT_REGS_OFFSET
796
797 movi a4, panic
798 movi a6, unrecoverable_text
799
800 callx4 a4
801
8021: j 1b
803
Chris Zankeld1538c42012-11-16 16:16:20 -0800804ENDPROC(unrecoverable_exception)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700805
806/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
807
808/*
809 * Fast-handler for alloca exceptions
810 *
811 * The ALLOCA handler is entered when user code executes the MOVSP
812 * instruction and the caller's frame is not in the register file.
813 * In this case, the caller frame's a0..a3 are on the stack just
814 * below sp (a1), and this handler moves them.
815 *
816 * For "MOVSP <ar>,<as>" without destination register a1, this routine
817 * simply moves the value from <as> to <ar> without moving the save area.
818 *
819 * Entry condition:
820 *
821 * a0: trashed, original value saved on stack (PT_AREG0)
822 * a1: a1
823 * a2: new stack pointer, original in DEPC
824 * a3: dispatch table
825 * depc: a2, original value saved on stack (PT_DEPC)
826 * excsave_1: a3
827 *
828 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
829 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
830 */
831
832#if XCHAL_HAVE_BE
833#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
834#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
835#else
836#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
837#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
838#endif
839
840ENTRY(fast_alloca)
841
842 /* We shouldn't be in a double exception. */
843
844 l32i a0, a2, PT_DEPC
845 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
846
Max Filippovbc5378f2012-10-15 03:55:38 +0400847 rsr a0, depc # get a2
Chris Zankel5a0015d2005-06-23 22:01:16 -0700848 s32i a4, a2, PT_AREG4 # save a4 and
849 s32i a0, a2, PT_AREG2 # a2 to stack
850
851 /* Exit critical section. */
852
853 movi a0, 0
854 s32i a0, a3, EXC_TABLE_FIXUP
855
856 /* Restore a3, excsave_1 */
857
Max Filippovbc5378f2012-10-15 03:55:38 +0400858 xsr a3, excsave1 # make sure excsave_1 is valid for dbl.
859 rsr a4, epc1 # get exception address
Chris Zankel5a0015d2005-06-23 22:01:16 -0700860 s32i a3, a2, PT_AREG3 # save a3 to stack
861
862#ifdef ALLOCA_EXCEPTION_IN_IRAM
863#error iram not supported
864#else
865 /* Note: l8ui not allowed in IRAM/IROM!! */
866 l8ui a0, a4, 1 # read as(src) from MOVSP instruction
867#endif
868 movi a3, .Lmovsp_src
869 _EXTUI_MOVSP_SRC(a0) # extract source register number
870 addx8 a3, a0, a3
871 jx a3
872
873.Lunhandled_double:
Max Filippovbc5378f2012-10-15 03:55:38 +0400874 wsr a0, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -0700875 movi a0, unrecoverable_exception
876 callx0 a0
877
878 .align 8
879.Lmovsp_src:
880 l32i a3, a2, PT_AREG0; _j 1f; .align 8
881 mov a3, a1; _j 1f; .align 8
882 l32i a3, a2, PT_AREG2; _j 1f; .align 8
883 l32i a3, a2, PT_AREG3; _j 1f; .align 8
884 l32i a3, a2, PT_AREG4; _j 1f; .align 8
885 mov a3, a5; _j 1f; .align 8
886 mov a3, a6; _j 1f; .align 8
887 mov a3, a7; _j 1f; .align 8
888 mov a3, a8; _j 1f; .align 8
889 mov a3, a9; _j 1f; .align 8
890 mov a3, a10; _j 1f; .align 8
891 mov a3, a11; _j 1f; .align 8
892 mov a3, a12; _j 1f; .align 8
893 mov a3, a13; _j 1f; .align 8
894 mov a3, a14; _j 1f; .align 8
895 mov a3, a15; _j 1f; .align 8
896
8971:
898
899#ifdef ALLOCA_EXCEPTION_IN_IRAM
900#error iram not supported
901#else
902 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
903#endif
904 addi a4, a4, 3 # step over movsp
905 _EXTUI_MOVSP_DST(a0) # extract destination register
Max Filippovbc5378f2012-10-15 03:55:38 +0400906 wsr a4, epc1 # save new epc_1
Chris Zankel5a0015d2005-06-23 22:01:16 -0700907
908 _bnei a0, 1, 1f # no 'movsp a1, ax': jump
909
Chris Zankelc4c45942012-11-28 16:53:51 -0800910 /* Move the save area. This implies the use of the L32E
Chris Zankel5a0015d2005-06-23 22:01:16 -0700911 * and S32E instructions, because this move must be done with
912 * the user's PS.RING privilege levels, not with ring 0
913 * (kernel's) privileges currently active with PS.EXCM
914 * set. Note that we have stil registered a fixup routine with the
915 * double exception vector in case a double exception occurs.
916 */
917
918 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
919
920 l32e a0, a1, -16
921 l32e a4, a1, -12
922 s32e a0, a3, -16
923 s32e a4, a3, -12
924 l32e a0, a1, -8
925 l32e a4, a1, -4
926 s32e a0, a3, -8
927 s32e a4, a3, -4
928
929 /* Restore stack-pointer and all the other saved registers. */
930
931 mov a1, a3
932
933 l32i a4, a2, PT_AREG4
934 l32i a3, a2, PT_AREG3
935 l32i a0, a2, PT_AREG0
936 l32i a2, a2, PT_AREG2
937 rfe
938
939 /* MOVSP <at>,<as> was invoked with <at> != a1.
940 * Because the stack pointer is not being modified,
941 * we should be able to just modify the pointer
942 * without moving any save area.
943 * The processor only traps these occurrences if the
944 * caller window isn't live, so unfortunately we can't
945 * use this as an alternate trap mechanism.
946 * So we just do the move. This requires that we
947 * resolve the destination register, not just the source,
948 * so there's some extra work.
949 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
950 */
951
952 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
953
9541: movi a4, .Lmovsp_dst
955 addx8 a4, a0, a4
956 jx a4
957
958 .align 8
959.Lmovsp_dst:
960 s32i a3, a2, PT_AREG0; _j 1f; .align 8
961 mov a1, a3; _j 1f; .align 8
962 s32i a3, a2, PT_AREG2; _j 1f; .align 8
963 s32i a3, a2, PT_AREG3; _j 1f; .align 8
964 s32i a3, a2, PT_AREG4; _j 1f; .align 8
965 mov a5, a3; _j 1f; .align 8
966 mov a6, a3; _j 1f; .align 8
967 mov a7, a3; _j 1f; .align 8
968 mov a8, a3; _j 1f; .align 8
969 mov a9, a3; _j 1f; .align 8
970 mov a10, a3; _j 1f; .align 8
971 mov a11, a3; _j 1f; .align 8
972 mov a12, a3; _j 1f; .align 8
973 mov a13, a3; _j 1f; .align 8
974 mov a14, a3; _j 1f; .align 8
975 mov a15, a3; _j 1f; .align 8
976
9771: l32i a4, a2, PT_AREG4
978 l32i a3, a2, PT_AREG3
979 l32i a0, a2, PT_AREG0
980 l32i a2, a2, PT_AREG2
981 rfe
982
Chris Zankeld1538c42012-11-16 16:16:20 -0800983ENDPROC(fast_alloca)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700984
985/*
986 * fast system calls.
987 *
988 * WARNING: The kernel doesn't save the entire user context before
989 * handling a fast system call. These functions are small and short,
990 * usually offering some functionality not available to user tasks.
991 *
992 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
993 *
994 * Entry condition:
995 *
996 * a0: trashed, original value saved on stack (PT_AREG0)
997 * a1: a1
998 * a2: new stack pointer, original in DEPC
999 * a3: dispatch table
1000 * depc: a2, original value saved on stack (PT_DEPC)
1001 * excsave_1: a3
1002 */
1003
1004ENTRY(fast_syscall_kernel)
1005
1006 /* Skip syscall. */
1007
Max Filippovbc5378f2012-10-15 03:55:38 +04001008 rsr a0, epc1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001009 addi a0, a0, 3
Max Filippovbc5378f2012-10-15 03:55:38 +04001010 wsr a0, epc1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001011
1012 l32i a0, a2, PT_DEPC
1013 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1014
Max Filippovbc5378f2012-10-15 03:55:38 +04001015 rsr a0, depc # get syscall-nr
Chris Zankel5a0015d2005-06-23 22:01:16 -07001016 _beqz a0, fast_syscall_spill_registers
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001017 _beqi a0, __NR_xtensa, fast_syscall_xtensa
Chris Zankel5a0015d2005-06-23 22:01:16 -07001018
1019 j kernel_exception
1020
Chris Zankeld1538c42012-11-16 16:16:20 -08001021ENDPROC(fast_syscall_kernel)
1022
Chris Zankel5a0015d2005-06-23 22:01:16 -07001023ENTRY(fast_syscall_user)
1024
1025 /* Skip syscall. */
1026
Max Filippovbc5378f2012-10-15 03:55:38 +04001027 rsr a0, epc1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001028 addi a0, a0, 3
Max Filippovbc5378f2012-10-15 03:55:38 +04001029 wsr a0, epc1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001030
1031 l32i a0, a2, PT_DEPC
1032 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1033
Max Filippovbc5378f2012-10-15 03:55:38 +04001034 rsr a0, depc # get syscall-nr
Chris Zankel5a0015d2005-06-23 22:01:16 -07001035 _beqz a0, fast_syscall_spill_registers
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001036 _beqi a0, __NR_xtensa, fast_syscall_xtensa
Chris Zankel5a0015d2005-06-23 22:01:16 -07001037
1038 j user_exception
1039
Chris Zankeld1538c42012-11-16 16:16:20 -08001040ENDPROC(fast_syscall_user)
1041
Chris Zankel5a0015d2005-06-23 22:01:16 -07001042ENTRY(fast_syscall_unrecoverable)
1043
Chris Zankelc4c45942012-11-28 16:53:51 -08001044 /* Restore all states. */
Chris Zankel5a0015d2005-06-23 22:01:16 -07001045
Chris Zankelc4c45942012-11-28 16:53:51 -08001046 l32i a0, a2, PT_AREG0 # restore a0
1047 xsr a2, depc # restore a2, depc
1048 rsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001049
Chris Zankelc4c45942012-11-28 16:53:51 -08001050 wsr a0, excsave1
1051 movi a0, unrecoverable_exception
1052 callx0 a0
Chris Zankel5a0015d2005-06-23 22:01:16 -07001053
Chris Zankeld1538c42012-11-16 16:16:20 -08001054ENDPROC(fast_syscall_unrecoverable)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001055
1056/*
1057 * sysxtensa syscall handler
1058 *
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001059 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
1060 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
1061 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
1062 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1063 * a2 a6 a3 a4 a5
Chris Zankel5a0015d2005-06-23 22:01:16 -07001064 *
1065 * Entry condition:
1066 *
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001067 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001068 * a1: a1
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001069 * a2: new stack pointer, original in a0 and DEPC
1070 * a3: dispatch table, original in excsave_1
1071 * a4..a15: unchanged
Chris Zankel5a0015d2005-06-23 22:01:16 -07001072 * depc: a2, original value saved on stack (PT_DEPC)
1073 * excsave_1: a3
1074 *
1075 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1076 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1077 *
1078 * Note: we don't have to save a2; a2 holds the return value
1079 *
1080 * We use the two macros TRY and CATCH:
1081 *
1082 * TRY adds an entry to the __ex_table fixup table for the immediately
1083 * following instruction.
1084 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001085 * CATCH catches any exception that occurred at one of the preceding TRY
Chris Zankel5a0015d2005-06-23 22:01:16 -07001086 * statements and continues from there
1087 *
1088 * Usage TRY l32i a0, a1, 0
1089 * <other code>
1090 * done: rfe
1091 * CATCH <set return code>
1092 * j done
1093 */
1094
1095#define TRY \
1096 .section __ex_table, "a"; \
1097 .word 66f, 67f; \
1098 .text; \
109966:
1100
1101#define CATCH \
110267:
1103
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001104ENTRY(fast_syscall_xtensa)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001105
Max Filippovbc5378f2012-10-15 03:55:38 +04001106 xsr a3, excsave1 # restore a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001107
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001108 s32i a7, a2, PT_AREG7 # we need an additional register
Chris Zankel5a0015d2005-06-23 22:01:16 -07001109 movi a7, 4 # sizeof(unsigned int)
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001110 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
Chris Zankel5a0015d2005-06-23 22:01:16 -07001111
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001112 addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
1113 _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
1114 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
Chris Zankel5a0015d2005-06-23 22:01:16 -07001115
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001116 /* Fall through for ATOMIC_CMP_SWP. */
Chris Zankel5a0015d2005-06-23 22:01:16 -07001117
1118.Lswp: /* Atomic compare and swap */
1119
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001120TRY l32i a0, a3, 0 # read old value
1121 bne a0, a4, 1f # same as old value? jump
1122TRY s32i a5, a3, 0 # different, modify value
1123 l32i a7, a2, PT_AREG7 # restore a7
1124 l32i a0, a2, PT_AREG0 # restore a0
1125 movi a2, 1 # and return 1
1126 addi a6, a6, 1 # restore a6 (really necessary?)
1127 rfe
Chris Zankel5a0015d2005-06-23 22:01:16 -07001128
Chris Zankelfc4fb2a2006-12-10 02:18:52 -080011291: l32i a7, a2, PT_AREG7 # restore a7
1130 l32i a0, a2, PT_AREG0 # restore a0
1131 movi a2, 0 # return 0 (note that we cannot set
1132 addi a6, a6, 1 # restore a6 (really necessary?)
1133 rfe
Chris Zankel5a0015d2005-06-23 22:01:16 -07001134
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001135.Lnswp: /* Atomic set, add, and exg_add. */
Chris Zankel5a0015d2005-06-23 22:01:16 -07001136
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001137TRY l32i a7, a3, 0 # orig
1138 add a0, a4, a7 # + arg
1139 moveqz a0, a4, a6 # set
1140TRY s32i a0, a3, 0 # write new value
Chris Zankel5a0015d2005-06-23 22:01:16 -07001141
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001142 mov a0, a2
Chris Zankel5a0015d2005-06-23 22:01:16 -07001143 mov a2, a7
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001144 l32i a7, a0, PT_AREG7 # restore a7
1145 l32i a0, a0, PT_AREG0 # restore a0
1146 addi a6, a6, 1 # restore a6 (really necessary?)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001147 rfe
1148
1149CATCH
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001150.Leac: l32i a7, a2, PT_AREG7 # restore a7
1151 l32i a0, a2, PT_AREG0 # restore a0
1152 movi a2, -EFAULT
1153 rfe
1154
1155.Lill: l32i a7, a2, PT_AREG0 # restore a7
1156 l32i a0, a2, PT_AREG0 # restore a0
1157 movi a2, -EINVAL
1158 rfe
1159
Chris Zankeld1538c42012-11-16 16:16:20 -08001160ENDPROC(fast_syscall_xtensa)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001161
1162
1163/* fast_syscall_spill_registers.
1164 *
1165 * Entry condition:
1166 *
1167 * a0: trashed, original value saved on stack (PT_AREG0)
1168 * a1: a1
1169 * a2: new stack pointer, original in DEPC
1170 * a3: dispatch table
1171 * depc: a2, original value saved on stack (PT_DEPC)
1172 * excsave_1: a3
1173 *
1174 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
Chris Zankel5a0015d2005-06-23 22:01:16 -07001175 */
1176
1177ENTRY(fast_syscall_spill_registers)
1178
1179 /* Register a FIXUP handler (pass current wb as a parameter) */
1180
1181 movi a0, fast_syscall_spill_registers_fixup
1182 s32i a0, a3, EXC_TABLE_FIXUP
Max Filippovbc5378f2012-10-15 03:55:38 +04001183 rsr a0, windowbase
Chris Zankel5a0015d2005-06-23 22:01:16 -07001184 s32i a0, a3, EXC_TABLE_PARAM
1185
1186 /* Save a3 and SAR on stack. */
1187
Max Filippovbc5378f2012-10-15 03:55:38 +04001188 rsr a0, sar
1189 xsr a3, excsave1 # restore a3 and excsave_1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001190 s32i a3, a2, PT_AREG3
Chris Zankelc658eac2008-02-12 13:17:07 -08001191 s32i a4, a2, PT_AREG4
1192 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
Chris Zankel5a0015d2005-06-23 22:01:16 -07001193
1194 /* The spill routine might clobber a7, a11, and a15. */
1195
Chris Zankelc658eac2008-02-12 13:17:07 -08001196 s32i a7, a2, PT_AREG7
1197 s32i a11, a2, PT_AREG11
1198 s32i a15, a2, PT_AREG15
Chris Zankel5a0015d2005-06-23 22:01:16 -07001199
Chris Zankelc658eac2008-02-12 13:17:07 -08001200 call0 _spill_registers # destroys a3, a4, and SAR
Chris Zankel5a0015d2005-06-23 22:01:16 -07001201
1202 /* Advance PC, restore registers and SAR, and return from exception. */
1203
Chris Zankelc658eac2008-02-12 13:17:07 -08001204 l32i a3, a2, PT_AREG5
1205 l32i a4, a2, PT_AREG4
Chris Zankel5a0015d2005-06-23 22:01:16 -07001206 l32i a0, a2, PT_AREG0
Max Filippovbc5378f2012-10-15 03:55:38 +04001207 wsr a3, sar
Chris Zankel5a0015d2005-06-23 22:01:16 -07001208 l32i a3, a2, PT_AREG3
1209
1210 /* Restore clobbered registers. */
1211
Chris Zankelc658eac2008-02-12 13:17:07 -08001212 l32i a7, a2, PT_AREG7
1213 l32i a11, a2, PT_AREG11
1214 l32i a15, a2, PT_AREG15
Chris Zankel5a0015d2005-06-23 22:01:16 -07001215
1216 movi a2, 0
1217 rfe
1218
Chris Zankeld1538c42012-11-16 16:16:20 -08001219ENDPROC(fast_syscall_spill_registers)
1220
Chris Zankel5a0015d2005-06-23 22:01:16 -07001221/* Fixup handler.
1222 *
1223 * We get here if the spill routine causes an exception, e.g. tlb miss.
1224 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1225 * we entered the spill routine and jump to the user exception handler.
1226 *
1227 * a0: value of depc, original value in depc
1228 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1229 * a3: exctable, original value in excsave1
1230 */
1231
1232fast_syscall_spill_registers_fixup:
1233
Max Filippovbc5378f2012-10-15 03:55:38 +04001234 rsr a2, windowbase # get current windowbase (a2 is saved)
1235 xsr a0, depc # restore depc and a0
Chris Zankel5a0015d2005-06-23 22:01:16 -07001236 ssl a2 # set shift (32 - WB)
1237
1238 /* We need to make sure the current registers (a0-a3) are preserved.
1239 * To do this, we simply set the bit for the current window frame
1240 * in WS, so that the exception handlers save them to the task stack.
1241 */
1242
Max Filippovbc5378f2012-10-15 03:55:38 +04001243 rsr a3, excsave1 # get spill-mask
Chris Zankel5a0015d2005-06-23 22:01:16 -07001244 slli a2, a3, 1 # shift left by one
1245
1246 slli a3, a2, 32-WSBITS
1247 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
Max Filippovbc5378f2012-10-15 03:55:38 +04001248 wsr a2, windowstart # set corrected windowstart
Chris Zankel5a0015d2005-06-23 22:01:16 -07001249
1250 movi a3, exc_table
1251 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
1252 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
1253
1254 /* Return to the original (user task) WINDOWBASE.
1255 * We leave the following frame behind:
1256 * a0, a1, a2 same
1257 * a3: trashed (saved in excsave_1)
1258 * depc: depc (we have to return to that address)
1259 * excsave_1: a3
1260 */
1261
Max Filippovbc5378f2012-10-15 03:55:38 +04001262 wsr a3, windowbase
Chris Zankel5a0015d2005-06-23 22:01:16 -07001263 rsync
1264
1265 /* We are now in the original frame when we entered _spill_registers:
1266 * a0: return address
1267 * a1: used, stack pointer
1268 * a2: kernel stack pointer
1269 * a3: available, saved in EXCSAVE_1
1270 * depc: exception address
1271 * excsave: a3
1272 * Note: This frame might be the same as above.
1273 */
1274
Chris Zankel5a0015d2005-06-23 22:01:16 -07001275 /* Setup stack pointer. */
1276
1277 addi a2, a2, -PT_USER_SIZE
1278 s32i a0, a2, PT_AREG0
1279
1280 /* Make sure we return to this fixup handler. */
1281
1282 movi a3, fast_syscall_spill_registers_fixup_return
1283 s32i a3, a2, PT_DEPC # setup depc
1284
1285 /* Jump to the exception handler. */
1286
1287 movi a3, exc_table
Max Filippovbc5378f2012-10-15 03:55:38 +04001288 rsr a0, exccause
Chris Zankelc4c45942012-11-28 16:53:51 -08001289 addx4 a0, a0, a3 # find entry in table
1290 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1291 jx a0
Chris Zankel5a0015d2005-06-23 22:01:16 -07001292
1293fast_syscall_spill_registers_fixup_return:
1294
1295 /* When we return here, all registers have been restored (a2: DEPC) */
1296
Max Filippovbc5378f2012-10-15 03:55:38 +04001297 wsr a2, depc # exception address
Chris Zankel5a0015d2005-06-23 22:01:16 -07001298
1299 /* Restore fixup handler. */
1300
Max Filippovbc5378f2012-10-15 03:55:38 +04001301 xsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001302 movi a2, fast_syscall_spill_registers_fixup
1303 s32i a2, a3, EXC_TABLE_FIXUP
Max Filippovbc5378f2012-10-15 03:55:38 +04001304 rsr a2, windowbase
Chris Zankel5a0015d2005-06-23 22:01:16 -07001305 s32i a2, a3, EXC_TABLE_PARAM
1306 l32i a2, a3, EXC_TABLE_KSTK
1307
Chris Zankel5a0015d2005-06-23 22:01:16 -07001308 /* Load WB at the time the exception occurred. */
1309
Max Filippovbc5378f2012-10-15 03:55:38 +04001310 rsr a3, sar # WB is still in SAR
Chris Zankel5a0015d2005-06-23 22:01:16 -07001311 neg a3, a3
Max Filippovbc5378f2012-10-15 03:55:38 +04001312 wsr a3, windowbase
Chris Zankel5a0015d2005-06-23 22:01:16 -07001313 rsync
1314
1315 /* Restore a3 and return. */
1316
1317 movi a3, exc_table
Max Filippovbc5378f2012-10-15 03:55:38 +04001318 xsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001319
1320 rfde
1321
1322
1323/*
1324 * spill all registers.
1325 *
1326 * This is not a real function. The following conditions must be met:
1327 *
1328 * - must be called with call0.
Chris Zankelc658eac2008-02-12 13:17:07 -08001329 * - uses a3, a4 and SAR.
Chris Zankel5a0015d2005-06-23 22:01:16 -07001330 * - the last 'valid' register of each frame are clobbered.
1331 * - the caller must have registered a fixup handler
1332 * (or be inside a critical section)
1333 * - PS_EXCM must be set (PS_WOE cleared?)
1334 */
1335
1336ENTRY(_spill_registers)
1337
1338 /*
1339 * Rotate ws so that the current windowbase is at bit 0.
1340 * Assume ws = xxxwww1yy (www1 current window frame).
Chris Zankelc658eac2008-02-12 13:17:07 -08001341 * Rotate ws right so that a4 = yyxxxwww1.
Chris Zankel5a0015d2005-06-23 22:01:16 -07001342 */
1343
Max Filippovbc5378f2012-10-15 03:55:38 +04001344 rsr a4, windowbase
1345 rsr a3, windowstart # a3 = xxxwww1yy
Chris Zankelc658eac2008-02-12 13:17:07 -08001346 ssr a4 # holds WB
1347 slli a4, a3, WSBITS
1348 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
Chris Zankelea0b6b02008-01-09 09:22:36 -08001349 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001350
1351 /* We are done if there are no more than the current register frame. */
1352
Chris Zankel50c07162007-11-14 13:47:02 -08001353 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
Chris Zankelc658eac2008-02-12 13:17:07 -08001354 movi a4, (1 << (WSBITS-1))
Chris Zankel5a0015d2005-06-23 22:01:16 -07001355 _beqz a3, .Lnospill # only one active frame? jump
1356
1357 /* We want 1 at the top, so that we return to the current windowbase */
1358
Chris Zankelc658eac2008-02-12 13:17:07 -08001359 or a3, a3, a4 # 1yyxxxwww
Chris Zankel5a0015d2005-06-23 22:01:16 -07001360
1361 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1362
Max Filippovbc5378f2012-10-15 03:55:38 +04001363 wsr a3, windowstart # save shifted windowstart
Chris Zankelc658eac2008-02-12 13:17:07 -08001364 neg a4, a3
1365 and a3, a4, a3 # first bit set from right: 000010000
Chris Zankel5a0015d2005-06-23 22:01:16 -07001366
Chris Zankelc658eac2008-02-12 13:17:07 -08001367 ffs_ws a4, a3 # a4: shifts to skip empty frames
Chris Zankel5a0015d2005-06-23 22:01:16 -07001368 movi a3, WSBITS
Chris Zankelc658eac2008-02-12 13:17:07 -08001369 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
1370 ssr a4 # save in SAR for later.
Chris Zankel5a0015d2005-06-23 22:01:16 -07001371
Max Filippovbc5378f2012-10-15 03:55:38 +04001372 rsr a3, windowbase
Chris Zankelc658eac2008-02-12 13:17:07 -08001373 add a3, a3, a4
Max Filippovbc5378f2012-10-15 03:55:38 +04001374 wsr a3, windowbase
Chris Zankel5a0015d2005-06-23 22:01:16 -07001375 rsync
1376
Max Filippovbc5378f2012-10-15 03:55:38 +04001377 rsr a3, windowstart
Chris Zankel5a0015d2005-06-23 22:01:16 -07001378 srl a3, a3 # shift windowstart
1379
1380 /* WB is now just one frame below the oldest frame in the register
1381 window. WS is shifted so the oldest frame is in bit 0, thus, WB
1382 and WS differ by one 4-register frame. */
1383
1384 /* Save frames. Depending what call was used (call4, call8, call12),
1385 * we have to save 4,8. or 12 registers.
1386 */
1387
1388 _bbsi.l a3, 1, .Lc4
1389 _bbsi.l a3, 2, .Lc8
1390
1391 /* Special case: we have a call12-frame starting at a4. */
1392
1393 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
1394
1395 s32e a4, a1, -16 # a1 is valid with an empty spill area
1396 l32e a4, a5, -12
1397 s32e a8, a4, -48
1398 mov a8, a4
1399 l32e a4, a1, -16
1400 j .Lc12c
1401
Chris Zankel50c07162007-11-14 13:47:02 -08001402.Lnospill:
Chris Zankelea0b6b02008-01-09 09:22:36 -08001403 ret
Chris Zankel50c07162007-11-14 13:47:02 -08001404
Chris Zankel5a0015d2005-06-23 22:01:16 -07001405.Lloop: _bbsi.l a3, 1, .Lc4
1406 _bbci.l a3, 2, .Lc12
1407
1408.Lc8: s32e a4, a13, -16
1409 l32e a4, a5, -12
1410 s32e a8, a4, -32
1411 s32e a5, a13, -12
1412 s32e a6, a13, -8
1413 s32e a7, a13, -4
1414 s32e a9, a4, -28
1415 s32e a10, a4, -24
1416 s32e a11, a4, -20
1417
1418 srli a11, a3, 2 # shift windowbase by 2
1419 rotw 2
1420 _bnei a3, 1, .Lloop
1421
1422.Lexit: /* Done. Do the final rotation, set WS, and return. */
1423
1424 rotw 1
Max Filippovbc5378f2012-10-15 03:55:38 +04001425 rsr a3, windowbase
Chris Zankel5a0015d2005-06-23 22:01:16 -07001426 ssl a3
1427 movi a3, 1
1428 sll a3, a3
Max Filippovbc5378f2012-10-15 03:55:38 +04001429 wsr a3, windowstart
Chris Zankelea0b6b02008-01-09 09:22:36 -08001430 ret
Chris Zankel5a0015d2005-06-23 22:01:16 -07001431
1432.Lc4: s32e a4, a9, -16
1433 s32e a5, a9, -12
1434 s32e a6, a9, -8
1435 s32e a7, a9, -4
1436
1437 srli a7, a3, 1
1438 rotw 1
1439 _bnei a3, 1, .Lloop
1440 j .Lexit
1441
1442.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
1443
1444 /* 12-register frame (call12) */
1445
1446 l32e a2, a5, -12
1447 s32e a8, a2, -48
1448 mov a8, a2
1449
1450.Lc12c: s32e a9, a8, -44
1451 s32e a10, a8, -40
1452 s32e a11, a8, -36
1453 s32e a12, a8, -32
1454 s32e a13, a8, -28
1455 s32e a14, a8, -24
1456 s32e a15, a8, -20
1457 srli a15, a3, 3
1458
1459 /* The stack pointer for a4..a7 is out of reach, so we rotate the
1460 * window, grab the stackpointer, and rotate back.
1461 * Alternatively, we could also use the following approach, but that
1462 * makes the fixup routine much more complicated:
1463 * rotw 1
1464 * s32e a0, a13, -16
1465 * ...
1466 * rotw 2
1467 */
1468
1469 rotw 1
1470 mov a5, a13
1471 rotw -1
1472
1473 s32e a4, a9, -16
1474 s32e a5, a9, -12
1475 s32e a6, a9, -8
1476 s32e a7, a9, -4
1477
1478 rotw 3
1479
1480 _beqi a3, 1, .Lexit
1481 j .Lloop
1482
1483.Linvalid_mask:
1484
1485 /* We get here because of an unrecoverable error in the window
1486 * registers. If we are in user space, we kill the application,
1487 * however, this condition is unrecoverable in kernel space.
1488 */
1489
Max Filippovbc5378f2012-10-15 03:55:38 +04001490 rsr a0, ps
Chris Zankel173d6682006-12-10 02:18:48 -08001491 _bbci.l a0, PS_UM_BIT, 1f
Chris Zankel5a0015d2005-06-23 22:01:16 -07001492
Chris Zankelc4c45942012-11-28 16:53:51 -08001493 /* User space: Setup a dummy frame and kill application.
Chris Zankel5a0015d2005-06-23 22:01:16 -07001494 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1495 */
1496
1497 movi a0, 1
1498 movi a1, 0
1499
Max Filippovbc5378f2012-10-15 03:55:38 +04001500 wsr a0, windowstart
1501 wsr a1, windowbase
Chris Zankel5a0015d2005-06-23 22:01:16 -07001502 rsync
1503
1504 movi a0, 0
1505
1506 movi a3, exc_table
1507 l32i a1, a3, EXC_TABLE_KSTK
Max Filippovbc5378f2012-10-15 03:55:38 +04001508 wsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001509
Marc Gauthier2d1c6452013-01-05 04:57:17 +04001510 movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
Max Filippovbc5378f2012-10-15 03:55:38 +04001511 wsr a4, ps
Chris Zankel5a0015d2005-06-23 22:01:16 -07001512 rsync
1513
1514 movi a6, SIGSEGV
1515 movi a4, do_exit
1516 callx4 a4
1517
15181: /* Kernel space: PANIC! */
1519
Max Filippovbc5378f2012-10-15 03:55:38 +04001520 wsr a0, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001521 movi a0, unrecoverable_exception
1522 callx0 a0 # should not return
15231: j 1b
1524
Chris Zankeld1538c42012-11-16 16:16:20 -08001525ENDPROC(_spill_registers)
1526
Johannes Weinere5083a62009-03-04 16:21:31 +01001527#ifdef CONFIG_MMU
Chris Zankel5a0015d2005-06-23 22:01:16 -07001528/*
1529 * We should never get here. Bail out!
1530 */
1531
1532ENTRY(fast_second_level_miss_double_kernel)
1533
15341: movi a0, unrecoverable_exception
1535 callx0 a0 # should not return
15361: j 1b
1537
Chris Zankeld1538c42012-11-16 16:16:20 -08001538ENDPROC(fast_second_level_miss_double_kernel)
1539
Chris Zankel5a0015d2005-06-23 22:01:16 -07001540/* First-level entry handler for user, kernel, and double 2nd-level
1541 * TLB miss exceptions. Note that for now, user and kernel miss
1542 * exceptions share the same entry point and are handled identically.
1543 *
1544 * An old, less-efficient C version of this function used to exist.
1545 * We include it below, interleaved as comments, for reference.
1546 *
1547 * Entry condition:
1548 *
1549 * a0: trashed, original value saved on stack (PT_AREG0)
1550 * a1: a1
1551 * a2: new stack pointer, original in DEPC
1552 * a3: dispatch table
1553 * depc: a2, original value saved on stack (PT_DEPC)
1554 * excsave_1: a3
1555 *
1556 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1557 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1558 */
1559
1560ENTRY(fast_second_level_miss)
1561
1562 /* Save a1. Note: we don't expect a double exception. */
1563
1564 s32i a1, a2, PT_AREG1
1565
1566 /* We need to map the page of PTEs for the user task. Find
1567 * the pointer to that page. Also, it's possible for tsk->mm
1568 * to be NULL while tsk->active_mm is nonzero if we faulted on
1569 * a vmalloc address. In that rare case, we must use
1570 * active_mm instead to avoid a fault in this handler. See
1571 *
1572 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1573 * (or search Internet on "mm vs. active_mm")
1574 *
1575 * if (!mm)
1576 * mm = tsk->active_mm;
1577 * pgd = pgd_offset (mm, regs->excvaddr);
1578 * pmd = pmd_offset (pgd, regs->excvaddr);
1579 * pmdval = *pmd;
1580 */
1581
1582 GET_CURRENT(a1,a2)
1583 l32i a0, a1, TASK_MM # tsk->mm
1584 beqz a0, 9f
1585
Chris Zankel01858d12007-08-06 23:57:57 -07001586
1587 /* We deliberately destroy a3 that holds the exception table. */
1588
Max Filippovbc5378f2012-10-15 03:55:38 +040015898: rsr a3, excvaddr # fault address
Chris Zankel01858d12007-08-06 23:57:57 -07001590 _PGD_OFFSET(a0, a3, a1)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001591 l32i a0, a0, 0 # read pmdval
Chris Zankel5a0015d2005-06-23 22:01:16 -07001592 beqz a0, 2f
1593
1594 /* Read ptevaddr and convert to top of page-table page.
1595 *
1596 * vpnval = read_ptevaddr_register() & PAGE_MASK;
1597 * vpnval += DTLB_WAY_PGTABLE;
1598 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1599 * write_dtlb_entry (pteval, vpnval);
1600 *
1601 * The messy computation for 'pteval' above really simplifies
1602 * into the following:
1603 *
Chris Zankel66569202007-08-22 10:14:51 -07001604 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
Chris Zankel5a0015d2005-06-23 22:01:16 -07001605 */
1606
Chris Zankel39070cb2012-10-17 23:08:20 -07001607 movi a1, (-PAGE_OFFSET) & 0xffffffff
Chris Zankel5a0015d2005-06-23 22:01:16 -07001608 add a0, a0, a1 # pmdval - PAGE_OFFSET
1609 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1610 xor a0, a0, a1
1611
Chris Zankel01858d12007-08-06 23:57:57 -07001612 movi a1, _PAGE_DIRECTORY
Chris Zankel5a0015d2005-06-23 22:01:16 -07001613 or a0, a0, a1 # ... | PAGE_DIRECTORY
1614
Chris Zankel01858d12007-08-06 23:57:57 -07001615 /*
Chris Zankel66569202007-08-22 10:14:51 -07001616 * We utilize all three wired-ways (7-9) to hold pmd translations.
Chris Zankel01858d12007-08-06 23:57:57 -07001617 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1618 * This allows to map the three most common regions to three different
1619 * DTLBs:
1620 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
1621 * 2 -> way 8 shared libaries (2000.0000)
1622 * 3 -> way 0 stack (3000.0000)
1623 */
Chris Zankel5a0015d2005-06-23 22:01:16 -07001624
Chris Zankel01858d12007-08-06 23:57:57 -07001625 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
Max Filippovbc5378f2012-10-15 03:55:38 +04001626 rsr a1, ptevaddr
Chris Zankel01858d12007-08-06 23:57:57 -07001627 addx2 a3, a3, a3 # -> 0,3,6,9
1628 srli a1, a1, PAGE_SHIFT
1629 extui a3, a3, 2, 2 # -> 0,0,1,2
1630 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1631 addi a3, a3, DTLB_WAY_PGD
1632 add a1, a1, a3 # ... + way_number
1633
16343: wdtlb a0, a1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001635 dsync
1636
1637 /* Exit critical section. */
1638
Chris Zankel01858d12007-08-06 23:57:57 -070016394: movi a3, exc_table # restore a3
Chris Zankel5a0015d2005-06-23 22:01:16 -07001640 movi a0, 0
1641 s32i a0, a3, EXC_TABLE_FIXUP
1642
1643 /* Restore the working registers, and return. */
1644
1645 l32i a0, a2, PT_AREG0
1646 l32i a1, a2, PT_AREG1
1647 l32i a2, a2, PT_DEPC
Max Filippovbc5378f2012-10-15 03:55:38 +04001648 xsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001649
1650 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1651
1652 /* Restore excsave1 and return. */
1653
Max Filippovbc5378f2012-10-15 03:55:38 +04001654 rsr a2, depc
Chris Zankel5a0015d2005-06-23 22:01:16 -07001655 rfe
1656
1657 /* Return from double exception. */
1658
Max Filippovbc5378f2012-10-15 03:55:38 +040016591: xsr a2, depc
Chris Zankel5a0015d2005-06-23 22:01:16 -07001660 esync
1661 rfde
1662
16639: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1664 j 8b
1665
Chris Zankel66569202007-08-22 10:14:51 -07001666#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1667
16682: /* Special case for cache aliasing.
1669 * We (should) only get here if a clear_user_page, copy_user_page
1670 * or the aliased cache flush functions got preemptively interrupted
1671 * by another task. Re-establish temporary mapping to the
1672 * TLBTEMP_BASE areas.
1673 */
1674
1675 /* We shouldn't be in a double exception */
1676
1677 l32i a0, a2, PT_DEPC
1678 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1679
1680 /* Make sure the exception originated in the special functions */
1681
1682 movi a0, __tlbtemp_mapping_start
Max Filippovbc5378f2012-10-15 03:55:38 +04001683 rsr a3, epc1
Chris Zankel66569202007-08-22 10:14:51 -07001684 bltu a3, a0, 2f
1685 movi a0, __tlbtemp_mapping_end
1686 bgeu a3, a0, 2f
1687
1688 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1689
1690 movi a3, TLBTEMP_BASE_1
Max Filippovbc5378f2012-10-15 03:55:38 +04001691 rsr a0, excvaddr
Chris Zankel66569202007-08-22 10:14:51 -07001692 bltu a0, a3, 2f
1693
1694 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
1695 bgeu a1, a3, 2f
1696
1697 /* Check if we have to restore an ITLB mapping. */
1698
1699 movi a1, __tlbtemp_mapping_itlb
Max Filippovbc5378f2012-10-15 03:55:38 +04001700 rsr a3, epc1
Chris Zankel66569202007-08-22 10:14:51 -07001701 sub a3, a3, a1
1702
1703 /* Calculate VPN */
1704
1705 movi a1, PAGE_MASK
1706 and a1, a1, a0
1707
1708 /* Jump for ITLB entry */
1709
1710 bgez a3, 1f
1711
1712 /* We can use up to two TLBTEMP areas, one for src and one for dst. */
1713
1714 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1715 add a1, a3, a1
1716
1717 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1718
1719 mov a0, a6
1720 movnez a0, a7, a3
1721 j 3b
1722
1723 /* ITLB entry. We only use dst in a6. */
1724
17251: witlb a6, a1
1726 isync
1727 j 4b
1728
1729
1730#endif // DCACHE_WAY_SIZE > PAGE_SIZE
1731
1732
Chris Zankel5a0015d2005-06-23 22:01:16 -070017332: /* Invalid PGD, default exception handling */
1734
Chris Zankel01858d12007-08-06 23:57:57 -07001735 movi a3, exc_table
Max Filippovbc5378f2012-10-15 03:55:38 +04001736 rsr a1, depc
1737 xsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001738 s32i a1, a2, PT_AREG2
1739 s32i a3, a2, PT_AREG3
1740 mov a1, a2
1741
Max Filippovbc5378f2012-10-15 03:55:38 +04001742 rsr a2, ps
Chris Zankel173d6682006-12-10 02:18:48 -08001743 bbsi.l a2, PS_UM_BIT, 1f
Chris Zankel5a0015d2005-06-23 22:01:16 -07001744 j _kernel_exception
17451: j _user_exception
1746
Chris Zankeld1538c42012-11-16 16:16:20 -08001747ENDPROC(fast_second_level_miss)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001748
1749/*
1750 * StoreProhibitedException
1751 *
1752 * Update the pte and invalidate the itlb mapping for this pte.
1753 *
1754 * Entry condition:
1755 *
1756 * a0: trashed, original value saved on stack (PT_AREG0)
1757 * a1: a1
1758 * a2: new stack pointer, original in DEPC
1759 * a3: dispatch table
1760 * depc: a2, original value saved on stack (PT_DEPC)
1761 * excsave_1: a3
1762 *
1763 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1764 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1765 */
1766
1767ENTRY(fast_store_prohibited)
1768
1769 /* Save a1 and a4. */
1770
1771 s32i a1, a2, PT_AREG1
1772 s32i a4, a2, PT_AREG4
1773
1774 GET_CURRENT(a1,a2)
1775 l32i a0, a1, TASK_MM # tsk->mm
1776 beqz a0, 9f
1777
Max Filippovbc5378f2012-10-15 03:55:38 +040017788: rsr a1, excvaddr # fault address
Chris Zankel5a0015d2005-06-23 22:01:16 -07001779 _PGD_OFFSET(a0, a1, a4)
1780 l32i a0, a0, 0
Chris Zankel5a0015d2005-06-23 22:01:16 -07001781 beqz a0, 2f
1782
Chris Zankel01858d12007-08-06 23:57:57 -07001783 /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
1784
Chris Zankel5a0015d2005-06-23 22:01:16 -07001785 _PTE_OFFSET(a0, a1, a4)
1786 l32i a4, a0, 0 # read pteval
Chris Zankel01858d12007-08-06 23:57:57 -07001787 bbci.l a4, _PAGE_WRITABLE_BIT, 2f
Chris Zankel5a0015d2005-06-23 22:01:16 -07001788
Chris Zankel01858d12007-08-06 23:57:57 -07001789 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
Chris Zankel5a0015d2005-06-23 22:01:16 -07001790 or a4, a4, a1
Max Filippovbc5378f2012-10-15 03:55:38 +04001791 rsr a1, excvaddr
Chris Zankel5a0015d2005-06-23 22:01:16 -07001792 s32i a4, a0, 0
1793
1794 /* We need to flush the cache if we have page coloring. */
1795#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1796 dhwb a0, 0
1797#endif
1798 pdtlb a0, a1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001799 wdtlb a4, a0
Chris Zankel5a0015d2005-06-23 22:01:16 -07001800
1801 /* Exit critical section. */
1802
1803 movi a0, 0
1804 s32i a0, a3, EXC_TABLE_FIXUP
1805
1806 /* Restore the working registers, and return. */
1807
1808 l32i a4, a2, PT_AREG4
1809 l32i a1, a2, PT_AREG1
1810 l32i a0, a2, PT_AREG0
1811 l32i a2, a2, PT_DEPC
1812
1813 /* Restore excsave1 and a3. */
1814
Max Filippovbc5378f2012-10-15 03:55:38 +04001815 xsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001816 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1817
Max Filippovbc5378f2012-10-15 03:55:38 +04001818 rsr a2, depc
Chris Zankel5a0015d2005-06-23 22:01:16 -07001819 rfe
1820
1821 /* Double exception. Restore FIXUP handler and return. */
1822
Max Filippovbc5378f2012-10-15 03:55:38 +040018231: xsr a2, depc
Chris Zankel5a0015d2005-06-23 22:01:16 -07001824 esync
1825 rfde
1826
18279: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1828 j 8b
1829
18302: /* If there was a problem, handle fault in C */
1831
Max Filippovbc5378f2012-10-15 03:55:38 +04001832 rsr a4, depc # still holds a2
1833 xsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001834 s32i a4, a2, PT_AREG2
1835 s32i a3, a2, PT_AREG3
1836 l32i a4, a2, PT_AREG4
1837 mov a1, a2
1838
Max Filippovbc5378f2012-10-15 03:55:38 +04001839 rsr a2, ps
Chris Zankel173d6682006-12-10 02:18:48 -08001840 bbsi.l a2, PS_UM_BIT, 1f
Chris Zankel5a0015d2005-06-23 22:01:16 -07001841 j _kernel_exception
18421: j _user_exception
Chris Zankeld1538c42012-11-16 16:16:20 -08001843
1844ENDPROC(fast_store_prohibited)
1845
Johannes Weinere5083a62009-03-04 16:21:31 +01001846#endif /* CONFIG_MMU */
Chris Zankel5a0015d2005-06-23 22:01:16 -07001847
Chris Zankel5a0015d2005-06-23 22:01:16 -07001848/*
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001849 * System Calls.
1850 *
1851 * void system_call (struct pt_regs* regs, int exccause)
1852 * a2 a3
1853 */
1854
1855ENTRY(system_call)
Chris Zankeld1538c42012-11-16 16:16:20 -08001856
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001857 entry a1, 32
1858
1859 /* regs->syscall = regs->areg[2] */
1860
1861 l32i a3, a2, PT_AREG2
1862 mov a6, a2
1863 movi a4, do_syscall_trace_enter
1864 s32i a3, a2, PT_SYSCALL
1865 callx4 a4
1866
1867 /* syscall = sys_call_table[syscall_nr] */
1868
1869 movi a4, sys_call_table;
1870 movi a5, __NR_syscall_count
1871 movi a6, -ENOSYS
1872 bgeu a3, a5, 1f
1873
1874 addx4 a4, a3, a4
1875 l32i a4, a4, 0
1876 movi a5, sys_ni_syscall;
1877 beq a4, a5, 1f
1878
1879 /* Load args: arg0 - arg5 are passed via regs. */
1880
1881 l32i a6, a2, PT_AREG6
1882 l32i a7, a2, PT_AREG3
1883 l32i a8, a2, PT_AREG4
1884 l32i a9, a2, PT_AREG5
1885 l32i a10, a2, PT_AREG8
1886 l32i a11, a2, PT_AREG9
1887
1888 /* Pass one additional argument to the syscall: pt_regs (on stack) */
1889 s32i a2, a1, 0
1890
1891 callx4 a4
1892
18931: /* regs->areg[2] = return_value */
1894
1895 s32i a6, a2, PT_AREG2
1896 movi a4, do_syscall_trace_leave
1897 mov a6, a2
1898 callx4 a4
1899 retw
1900
Chris Zankeld1538c42012-11-16 16:16:20 -08001901ENDPROC(system_call)
1902
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001903
1904/*
Chris Zankel5a0015d2005-06-23 22:01:16 -07001905 * Task switch.
1906 *
1907 * struct task* _switch_to (struct task* prev, struct task* next)
1908 * a2 a2 a3
1909 */
1910
1911ENTRY(_switch_to)
1912
1913 entry a1, 16
1914
Chris Zankelc658eac2008-02-12 13:17:07 -08001915 mov a12, a2 # preserve 'prev' (a2)
1916 mov a13, a3 # and 'next' (a3)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001917
Chris Zankelc658eac2008-02-12 13:17:07 -08001918 l32i a4, a2, TASK_THREAD_INFO
1919 l32i a5, a3, TASK_THREAD_INFO
Chris Zankel5a0015d2005-06-23 22:01:16 -07001920
Chris Zankelc658eac2008-02-12 13:17:07 -08001921 save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
Chris Zankel5a0015d2005-06-23 22:01:16 -07001922
Chris Zankelc658eac2008-02-12 13:17:07 -08001923 s32i a0, a12, THREAD_RA # save return address
1924 s32i a1, a12, THREAD_SP # save stack pointer
1925
1926 /* Disable ints while we manipulate the stack pointer. */
1927
1928 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
Max Filippovbc5378f2012-10-15 03:55:38 +04001929 xsr a14, ps
1930 rsr a3, excsave1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001931 rsync
1932 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
1933
Chris Zankelc658eac2008-02-12 13:17:07 -08001934 /* Switch CPENABLE */
1935
1936#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1937 l32i a3, a5, THREAD_CPENABLE
Max Filippovbc5378f2012-10-15 03:55:38 +04001938 xsr a3, cpenable
Chris Zankelc658eac2008-02-12 13:17:07 -08001939 s32i a3, a4, THREAD_CPENABLE
1940#endif
1941
1942 /* Flush register file. */
1943
1944 call0 _spill_registers # destroys a3, a4, and SAR
Chris Zankel5a0015d2005-06-23 22:01:16 -07001945
1946 /* Set kernel stack (and leave critical section)
1947 * Note: It's save to set it here. The stack will not be overwritten
1948 * because the kernel stack will only be loaded again after
1949 * we return from kernel space.
1950 */
1951
Max Filippovbc5378f2012-10-15 03:55:38 +04001952 rsr a3, excsave1 # exc_table
Chris Zankelc658eac2008-02-12 13:17:07 -08001953 movi a6, 0
1954 addi a7, a5, PT_REGS_OFFSET
1955 s32i a6, a3, EXC_TABLE_FIXUP
1956 s32i a7, a3, EXC_TABLE_KSTK
Chris Zankel5a0015d2005-06-23 22:01:16 -07001957
1958 /* restore context of the task that 'next' addresses */
1959
Chris Zankelc658eac2008-02-12 13:17:07 -08001960 l32i a0, a13, THREAD_RA # restore return address
1961 l32i a1, a13, THREAD_SP # restore stack pointer
Chris Zankel5a0015d2005-06-23 22:01:16 -07001962
Chris Zankelc658eac2008-02-12 13:17:07 -08001963 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
1964
Max Filippovbc5378f2012-10-15 03:55:38 +04001965 wsr a14, ps
Chris Zankelc658eac2008-02-12 13:17:07 -08001966 mov a2, a12 # return 'prev'
Chris Zankel5a0015d2005-06-23 22:01:16 -07001967 rsync
1968
1969 retw
1970
Chris Zankeld1538c42012-11-16 16:16:20 -08001971ENDPROC(_switch_to)
Chris Zankel5a0015d2005-06-23 22:01:16 -07001972
1973ENTRY(ret_from_fork)
1974
1975 /* void schedule_tail (struct task_struct *prev)
1976 * Note: prev is still in a6 (return value from fake call4 frame)
1977 */
1978 movi a4, schedule_tail
1979 callx4 a4
1980
Chris Zankelfc4fb2a2006-12-10 02:18:52 -08001981 movi a4, do_syscall_trace_leave
1982 mov a6, a1
Chris Zankel5a0015d2005-06-23 22:01:16 -07001983 callx4 a4
1984
1985 j common_exception_return
1986
Chris Zankeld1538c42012-11-16 16:16:20 -08001987ENDPROC(ret_from_fork)
1988
Max Filippov3306a722012-10-25 11:10:50 +04001989/*
1990 * Kernel thread creation helper
1991 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
1992 * left from _switch_to: a6 = prev
1993 */
1994ENTRY(ret_from_kernel_thread)
1995
1996 call4 schedule_tail
1997 mov a6, a3
1998 callx4 a2
Max Filippovf0a1bf02012-10-25 11:10:51 +04001999 j common_exception_return
Max Filippov3306a722012-10-25 11:10:50 +04002000
2001ENDPROC(ret_from_kernel_thread)