blob: 96f97f845495adb8a83a5a09c73288b89fa993e8 [file] [log] [blame]
Michal Simek6d5af1a2009-03-27 14:25:20 +01001/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/linkage.h>
12#include <asm/thread_info.h>
Michal Simek65504a42009-05-26 16:30:26 +020013#include <linux/errno.h>
Michal Simek6d5af1a2009-03-27 14:25:20 +010014#include <asm/entry.h>
15#include <asm/asm-offsets.h>
16#include <asm/registers.h>
17#include <asm/unistd.h>
18#include <asm/percpu.h>
19#include <asm/signal.h>
20
21#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
22 .macro disable_irq
23 msrclr r0, MSR_IE
24 .endm
25
26 .macro enable_irq
27 msrset r0, MSR_IE
28 .endm
29
30 .macro clear_bip
31 msrclr r0, MSR_BIP
32 .endm
33#else
34 .macro disable_irq
35 mfs r11, rmsr
36 andi r11, r11, ~MSR_IE
37 mts rmsr, r11
38 .endm
39
40 .macro enable_irq
41 mfs r11, rmsr
42 ori r11, r11, MSR_IE
43 mts rmsr, r11
44 .endm
45
46 .macro clear_bip
47 mfs r11, rmsr
48 andi r11, r11, ~MSR_BIP
49 mts rmsr, r11
50 .endm
51#endif
52
53ENTRY(_interrupt)
54 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
55 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
56 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
57 beqid r11, 1f
58 nop
59 brid 2f /* jump over */
60 addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
611: /* switch to kernel stack */
62 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
63 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
64 /* calculate kernel stack pointer */
65 addik r1, r1, THREAD_SIZE - PT_SIZE
662:
67 swi r11, r1, PT_MODE /* store the mode */
68 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
69 swi r2, r1, PT_R2
70 swi r3, r1, PT_R3
71 swi r4, r1, PT_R4
72 swi r5, r1, PT_R5
73 swi r6, r1, PT_R6
74 swi r7, r1, PT_R7
75 swi r8, r1, PT_R8
76 swi r9, r1, PT_R9
77 swi r10, r1, PT_R10
78 swi r11, r1, PT_R11
79 swi r12, r1, PT_R12
80 swi r13, r1, PT_R13
81 swi r14, r1, PT_R14
82 swi r14, r1, PT_PC
83 swi r15, r1, PT_R15
84 swi r16, r1, PT_R16
85 swi r17, r1, PT_R17
86 swi r18, r1, PT_R18
87 swi r19, r1, PT_R19
88 swi r20, r1, PT_R20
89 swi r21, r1, PT_R21
90 swi r22, r1, PT_R22
91 swi r23, r1, PT_R23
92 swi r24, r1, PT_R24
93 swi r25, r1, PT_R25
94 swi r26, r1, PT_R26
95 swi r27, r1, PT_R27
96 swi r28, r1, PT_R28
97 swi r29, r1, PT_R29
98 swi r30, r1, PT_R30
99 swi r31, r1, PT_R31
100 /* special purpose registers */
101 mfs r11, rmsr
102 swi r11, r1, PT_MSR
103 mfs r11, rear
104 swi r11, r1, PT_EAR
105 mfs r11, resr
106 swi r11, r1, PT_ESR
107 mfs r11, rfsr
108 swi r11, r1, PT_FSR
109 /* reload original stack pointer and save it */
110 lwi r11, r0, PER_CPU(ENTRY_SP)
111 swi r11, r1, PT_R1
112 /* update mode indicator we are in kernel mode */
113 addik r11, r0, 1
114 swi r11, r0, PER_CPU(KM)
115 /* restore r31 */
116 lwi r31, r0, PER_CPU(CURRENT_SAVE)
117 /* prepare the link register, the argument and jump */
Michal Simekcd341572011-02-01 09:00:57 +0100118 addik r15, r0, ret_from_intr - 8
Michal Simek6d5af1a2009-03-27 14:25:20 +0100119 addk r6, r0, r15
120 braid do_IRQ
121 add r5, r0, r1
122
123ret_from_intr:
124 lwi r11, r1, PT_MODE
Steven J. Magnani58424a42010-02-01 06:34:45 -0600125 bneid r11, no_intr_resched
Michal Simek6d5af1a2009-03-27 14:25:20 +0100126
127 lwi r6, r31, TS_THREAD_INFO /* get thread info */
128 lwi r19, r6, TI_FLAGS /* get flags in thread info */
129 /* do an extra work if any bits are set */
130
131 andi r11, r19, _TIF_NEED_RESCHED
132 beqi r11, 1f
133 bralid r15, schedule
134 nop
Al Viro969a9612012-04-24 02:03:06 -04001351: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
Steven J. Magnani58424a42010-02-01 06:34:45 -0600136 beqid r11, no_intr_resched
Michal Simek6d5af1a2009-03-27 14:25:20 +0100137 addk r5, r1, r0
Al Viro969a9612012-04-24 02:03:06 -0400138 bralid r15, do_notify_resume
Michal Simek6d5af1a2009-03-27 14:25:20 +0100139 addk r6, r0, r0
140
Steven J. Magnani58424a42010-02-01 06:34:45 -0600141no_intr_resched:
142 /* Disable interrupts, we are now committed to the state restore */
143 disable_irq
144
Michal Simek6d5af1a2009-03-27 14:25:20 +0100145 /* save mode indicator */
146 lwi r11, r1, PT_MODE
Michal Simek6d5af1a2009-03-27 14:25:20 +0100147 swi r11, r0, PER_CPU(KM)
148
149 /* save r31 */
150 swi r31, r0, PER_CPU(CURRENT_SAVE)
151restore_context:
152 /* special purpose registers */
153 lwi r11, r1, PT_FSR
154 mts rfsr, r11
155 lwi r11, r1, PT_ESR
156 mts resr, r11
157 lwi r11, r1, PT_EAR
158 mts rear, r11
159 lwi r11, r1, PT_MSR
160 mts rmsr, r11
161
162 lwi r31, r1, PT_R31
163 lwi r30, r1, PT_R30
164 lwi r29, r1, PT_R29
165 lwi r28, r1, PT_R28
166 lwi r27, r1, PT_R27
167 lwi r26, r1, PT_R26
168 lwi r25, r1, PT_R25
169 lwi r24, r1, PT_R24
170 lwi r23, r1, PT_R23
171 lwi r22, r1, PT_R22
172 lwi r21, r1, PT_R21
173 lwi r20, r1, PT_R20
174 lwi r19, r1, PT_R19
175 lwi r18, r1, PT_R18
176 lwi r17, r1, PT_R17
177 lwi r16, r1, PT_R16
178 lwi r15, r1, PT_R15
179 lwi r14, r1, PT_PC
180 lwi r13, r1, PT_R13
181 lwi r12, r1, PT_R12
182 lwi r11, r1, PT_R11
183 lwi r10, r1, PT_R10
184 lwi r9, r1, PT_R9
185 lwi r8, r1, PT_R8
186 lwi r7, r1, PT_R7
187 lwi r6, r1, PT_R6
188 lwi r5, r1, PT_R5
189 lwi r4, r1, PT_R4
190 lwi r3, r1, PT_R3
191 lwi r2, r1, PT_R2
192 lwi r1, r1, PT_R1
193 rtid r14, 0
194 nop
195
196ENTRY(_reset)
197 brai 0;
198
199ENTRY(_user_exception)
200 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
201 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
202 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
203 beqid r11, 1f /* Already in kernel mode? */
204 nop
205 brid 2f /* jump over */
206 addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
2071: /* Switch to kernel stack */
208 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
209 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
210 /* calculate kernel stack pointer */
211 addik r1, r1, THREAD_SIZE - PT_SIZE
Michal Simek6d5af1a2009-03-27 14:25:20 +01002122:
213 swi r11, r1, PT_MODE /* store the mode */
214 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
215 /* save them on stack */
216 swi r2, r1, PT_R2
217 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
218 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
219 swi r5, r1, PT_R5
220 swi r6, r1, PT_R6
221 swi r7, r1, PT_R7
222 swi r8, r1, PT_R8
223 swi r9, r1, PT_R9
224 swi r10, r1, PT_R10
225 swi r11, r1, PT_R11
226 /* r12: _always_ in clobber list; see unistd.h */
227 swi r12, r1, PT_R12
228 swi r13, r1, PT_R13
229 /* r14: _always_ in clobber list; see unistd.h */
230 swi r14, r1, PT_R14
231 /* but we want to return to the next inst. */
232 addik r14, r14, 0x4
233 swi r14, r1, PT_PC /* increment by 4 and store in pc */
234 swi r15, r1, PT_R15
235 swi r16, r1, PT_R16
236 swi r17, r1, PT_R17
237 swi r18, r1, PT_R18
238 swi r19, r1, PT_R19
239 swi r20, r1, PT_R20
240 swi r21, r1, PT_R21
241 swi r22, r1, PT_R22
242 swi r23, r1, PT_R23
243 swi r24, r1, PT_R24
244 swi r25, r1, PT_R25
245 swi r26, r1, PT_R26
246 swi r27, r1, PT_R27
247 swi r28, r1, PT_R28
248 swi r29, r1, PT_R29
249 swi r30, r1, PT_R30
250 swi r31, r1, PT_R31
251
252 disable_irq
253 nop /* make sure IE bit is in effect */
254 clear_bip /* once IE is in effect it is safe to clear BIP */
255 nop
256
257 /* special purpose registers */
258 mfs r11, rmsr
259 swi r11, r1, PT_MSR
260 mfs r11, rear
261 swi r11, r1, PT_EAR
262 mfs r11, resr
263 swi r11, r1, PT_ESR
264 mfs r11, rfsr
265 swi r11, r1, PT_FSR
266 /* reload original stack pointer and save it */
267 lwi r11, r0, PER_CPU(ENTRY_SP)
268 swi r11, r1, PT_R1
269 /* update mode indicator we are in kernel mode */
270 addik r11, r0, 1
271 swi r11, r0, PER_CPU(KM)
272 /* restore r31 */
273 lwi r31, r0, PER_CPU(CURRENT_SAVE)
274 /* re-enable interrupts now we are in kernel mode */
275 enable_irq
276
277 /* See if the system call number is valid. */
278 addi r11, r12, -__NR_syscalls
279 bgei r11, 1f /* return to user if not valid */
280 /* Figure out which function to use for this system call. */
281 /* Note Microblaze barrel shift is optional, so don't rely on it */
282 add r12, r12, r12 /* convert num -> ptr */
Al Viro14203e12012-04-29 04:11:34 -0400283 addik r30, r0, 1 /* restarts allowed */
Michal Simek6d5af1a2009-03-27 14:25:20 +0100284 add r12, r12, r12
285 lwi r12, r12, sys_call_table /* Get function pointer */
Michal Simekcd341572011-02-01 09:00:57 +0100286 addik r15, r0, ret_to_user-8 /* set return address */
Michal Simek6d5af1a2009-03-27 14:25:20 +0100287 bra r12 /* Make the system call. */
288 bri 0 /* won't reach here */
2891:
290 brid ret_to_user /* jump to syscall epilogue */
291 addi r3, r0, -ENOSYS /* set errno in delay slot */
292
293/*
294 * Debug traps are like a system call, but entered via brki r14, 0x60
Al Viro969a9612012-04-24 02:03:06 -0400295 * All we need to do is send the SIGTRAP signal to current, ptrace and
296 * do_notify_resume will handle the rest
Michal Simek6d5af1a2009-03-27 14:25:20 +0100297 */
298ENTRY(_debug_exception)
299 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
300 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
301 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
302 addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
303 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
304 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
305//save_context:
306 swi r11, r1, PT_MODE /* store the mode */
307 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
308 /* save them on stack */
309 swi r2, r1, PT_R2
310 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
311 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
312 swi r5, r1, PT_R5
313 swi r6, r1, PT_R6
314 swi r7, r1, PT_R7
315 swi r8, r1, PT_R8
316 swi r9, r1, PT_R9
317 swi r10, r1, PT_R10
318 swi r11, r1, PT_R11
319 /* r12: _always_ in clobber list; see unistd.h */
320 swi r12, r1, PT_R12
321 swi r13, r1, PT_R13
322 /* r14: _always_ in clobber list; see unistd.h */
323 swi r14, r1, PT_R14
324 swi r14, r1, PT_PC /* Will return to interrupted instruction */
325 swi r15, r1, PT_R15
326 swi r16, r1, PT_R16
327 swi r17, r1, PT_R17
328 swi r18, r1, PT_R18
329 swi r19, r1, PT_R19
330 swi r20, r1, PT_R20
331 swi r21, r1, PT_R21
332 swi r22, r1, PT_R22
333 swi r23, r1, PT_R23
334 swi r24, r1, PT_R24
335 swi r25, r1, PT_R25
336 swi r26, r1, PT_R26
337 swi r27, r1, PT_R27
338 swi r28, r1, PT_R28
339 swi r29, r1, PT_R29
340 swi r30, r1, PT_R30
341 swi r31, r1, PT_R31
342
343 disable_irq
344 nop /* make sure IE bit is in effect */
345 clear_bip /* once IE is in effect it is safe to clear BIP */
346 nop
347
348 /* special purpose registers */
349 mfs r11, rmsr
350 swi r11, r1, PT_MSR
351 mfs r11, rear
352 swi r11, r1, PT_EAR
353 mfs r11, resr
354 swi r11, r1, PT_ESR
355 mfs r11, rfsr
356 swi r11, r1, PT_FSR
357 /* reload original stack pointer and save it */
358 lwi r11, r0, PER_CPU(ENTRY_SP)
359 swi r11, r1, PT_R1
360 /* update mode indicator we are in kernel mode */
361 addik r11, r0, 1
362 swi r11, r0, PER_CPU(KM)
363 /* restore r31 */
364 lwi r31, r0, PER_CPU(CURRENT_SAVE)
365 /* re-enable interrupts now we are in kernel mode */
366 enable_irq
367
368 addi r5, r0, SIGTRAP /* sending the trap signal */
369 add r6, r0, r31 /* to current */
370 bralid r15, send_sig
371 add r7, r0, r0 /* 3rd param zero */
372
Al Viro14203e12012-04-29 04:11:34 -0400373 addik r30, r0, 1 /* restarts allowed ??? */
Michal Simek6d5af1a2009-03-27 14:25:20 +0100374 /* Restore r3/r4 to work around how ret_to_user works */
375 lwi r3, r1, PT_R3
376 lwi r4, r1, PT_R4
377 bri ret_to_user
378
379ENTRY(_break)
380 bri 0
381
382/* struct task_struct *_switch_to(struct thread_info *prev,
383 struct thread_info *next); */
384ENTRY(_switch_to)
385 /* prepare return value */
386 addk r3, r0, r31
387
388 /* save registers in cpu_context */
389 /* use r11 and r12, volatile registers, as temp register */
390 addik r11, r5, TI_CPU_CONTEXT
391 swi r1, r11, CC_R1
392 swi r2, r11, CC_R2
393 /* skip volatile registers.
394 * they are saved on stack when we jumped to _switch_to() */
395 /* dedicated registers */
396 swi r13, r11, CC_R13
397 swi r14, r11, CC_R14
398 swi r15, r11, CC_R15
399 swi r16, r11, CC_R16
400 swi r17, r11, CC_R17
401 swi r18, r11, CC_R18
402 /* save non-volatile registers */
403 swi r19, r11, CC_R19
404 swi r20, r11, CC_R20
405 swi r21, r11, CC_R21
406 swi r22, r11, CC_R22
407 swi r23, r11, CC_R23
408 swi r24, r11, CC_R24
409 swi r25, r11, CC_R25
410 swi r26, r11, CC_R26
411 swi r27, r11, CC_R27
412 swi r28, r11, CC_R28
413 swi r29, r11, CC_R29
414 swi r30, r11, CC_R30
415 /* special purpose registers */
416 mfs r12, rmsr
417 swi r12, r11, CC_MSR
418 mfs r12, rear
419 swi r12, r11, CC_EAR
420 mfs r12, resr
421 swi r12, r11, CC_ESR
422 mfs r12, rfsr
423 swi r12, r11, CC_FSR
424
425 /* update r31, the current */
426 lwi r31, r6, TI_TASK
427 swi r31, r0, PER_CPU(CURRENT_SAVE)
428
429 /* get new process' cpu context and restore */
430 addik r11, r6, TI_CPU_CONTEXT
431
432 /* special purpose registers */
433 lwi r12, r11, CC_FSR
434 mts rfsr, r12
435 lwi r12, r11, CC_ESR
436 mts resr, r12
437 lwi r12, r11, CC_EAR
438 mts rear, r12
439 lwi r12, r11, CC_MSR
440 mts rmsr, r12
441 /* non-volatile registers */
442 lwi r30, r11, CC_R30
443 lwi r29, r11, CC_R29
444 lwi r28, r11, CC_R28
445 lwi r27, r11, CC_R27
446 lwi r26, r11, CC_R26
447 lwi r25, r11, CC_R25
448 lwi r24, r11, CC_R24
449 lwi r23, r11, CC_R23
450 lwi r22, r11, CC_R22
451 lwi r21, r11, CC_R21
452 lwi r20, r11, CC_R20
453 lwi r19, r11, CC_R19
454 /* dedicated registers */
455 lwi r18, r11, CC_R18
456 lwi r17, r11, CC_R17
457 lwi r16, r11, CC_R16
458 lwi r15, r11, CC_R15
459 lwi r14, r11, CC_R14
460 lwi r13, r11, CC_R13
461 /* skip volatile registers */
462 lwi r2, r11, CC_R2
463 lwi r1, r11, CC_R1
464
465 rtsd r15, 8
466 nop
467
468ENTRY(ret_from_fork)
469 addk r5, r0, r3
Michal Simek6d5af1a2009-03-27 14:25:20 +0100470 brlid r15, schedule_tail
471 nop
472 swi r31, r1, PT_R31 /* save r31 in user context. */
473 /* will soon be restored to r31 in ret_to_user */
474 addk r3, r0, r0
475 brid ret_to_user
476 nop
477
Al Viro23192952012-10-06 13:52:37 -0400478ENTRY(ret_from_kernel_thread)
479 brlid r15, schedule_tail
480 addk r5, r0, r3
481 brald r15, r20
482 addk r5, r0, r19
Al Viro99c59f62012-10-10 11:52:44 -0400483 brid ret_to_user
484 addk r3, r0, r0
Al Viro23192952012-10-06 13:52:37 -0400485
Michal Simek6d5af1a2009-03-27 14:25:20 +0100486work_pending:
Al Viroc886a9f2012-04-29 03:35:29 -0400487 lwi r11, r1, PT_MODE
488 bneid r11, 2f
Steven J. Magnani0d5961b2010-04-27 13:00:23 -0500489 enable_irq
490
Michal Simek6d5af1a2009-03-27 14:25:20 +0100491 andi r11, r19, _TIF_NEED_RESCHED
492 beqi r11, 1f
493 bralid r15, schedule
494 nop
Al Viro969a9612012-04-24 02:03:06 -04004951: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
Michal Simek6d5af1a2009-03-27 14:25:20 +0100496 beqi r11, no_work_pending
Al Viro14203e12012-04-29 04:11:34 -0400497 addk r5, r30, r0
Al Viro969a9612012-04-24 02:03:06 -0400498 bralid r15, do_notify_resume
Al Viro83140192012-04-24 02:21:18 -0400499 addik r6, r0, 1
Michal Simek6d5af1a2009-03-27 14:25:20 +0100500 bri no_work_pending
501
502ENTRY(ret_to_user)
503 disable_irq
504
505 swi r4, r1, PT_R4 /* return val */
506 swi r3, r1, PT_R3 /* return val */
507
508 lwi r6, r31, TS_THREAD_INFO /* get thread info */
509 lwi r19, r6, TI_FLAGS /* get flags in thread info */
510 bnei r19, work_pending /* do an extra work if any bits are set */
511no_work_pending:
512 disable_irq
513
Al Viroc886a9f2012-04-29 03:35:29 -04005142:
Michal Simek6d5af1a2009-03-27 14:25:20 +0100515 /* save r31 */
516 swi r31, r0, PER_CPU(CURRENT_SAVE)
517 /* save mode indicator */
518 lwi r18, r1, PT_MODE
519 swi r18, r0, PER_CPU(KM)
520//restore_context:
521 /* special purpose registers */
522 lwi r18, r1, PT_FSR
523 mts rfsr, r18
524 lwi r18, r1, PT_ESR
525 mts resr, r18
526 lwi r18, r1, PT_EAR
527 mts rear, r18
528 lwi r18, r1, PT_MSR
529 mts rmsr, r18
530
531 lwi r31, r1, PT_R31
532 lwi r30, r1, PT_R30
533 lwi r29, r1, PT_R29
534 lwi r28, r1, PT_R28
535 lwi r27, r1, PT_R27
536 lwi r26, r1, PT_R26
537 lwi r25, r1, PT_R25
538 lwi r24, r1, PT_R24
539 lwi r23, r1, PT_R23
540 lwi r22, r1, PT_R22
541 lwi r21, r1, PT_R21
542 lwi r20, r1, PT_R20
543 lwi r19, r1, PT_R19
544 lwi r18, r1, PT_R18
545 lwi r17, r1, PT_R17
546 lwi r16, r1, PT_R16
547 lwi r15, r1, PT_R15
548 lwi r14, r1, PT_PC
549 lwi r13, r1, PT_R13
550 lwi r12, r1, PT_R12
551 lwi r11, r1, PT_R11
552 lwi r10, r1, PT_R10
553 lwi r9, r1, PT_R9
554 lwi r8, r1, PT_R8
555 lwi r7, r1, PT_R7
556 lwi r6, r1, PT_R6
557 lwi r5, r1, PT_R5
558 lwi r4, r1, PT_R4 /* return val */
559 lwi r3, r1, PT_R3 /* return val */
560 lwi r2, r1, PT_R2
561 lwi r1, r1, PT_R1
562
563 rtid r14, 0
564 nop
565
Michal Simek6d5af1a2009-03-27 14:25:20 +0100566sys_rt_sigreturn_wrapper:
Al Viro14203e12012-04-29 04:11:34 -0400567 addk r30, r0, r0 /* no restarts for this one */
Michal Simek6d5af1a2009-03-27 14:25:20 +0100568 brid sys_rt_sigreturn
569 addk r5, r1, r0
570
Michal Simek6d5af1a2009-03-27 14:25:20 +0100571 /* Interrupt vector table */
572 .section .init.ivt, "ax"
573 .org 0x0
574 brai _reset
575 brai _user_exception
576 brai _interrupt
577 brai _break
578 brai _hw_exception_handler
579 .org 0x60
580 brai _debug_exception
581
582.section .rodata,"a"
583#include "syscall_table.S"
584
585syscall_table_size=(.-sys_call_table)
Steven J. Magnanice3266c2010-04-27 12:37:54 -0500586
587type_SYSCALL:
588 .ascii "SYSCALL\0"
589type_IRQ:
590 .ascii "IRQ\0"
591type_IRQ_PREEMPT:
592 .ascii "IRQ (PREEMPTED)\0"
593type_SYSCALL_PREEMPT:
594 .ascii " SYSCALL (PREEMPTED)\0"
595
596 /*
597 * Trap decoding for stack unwinder
598 * Tuples are (start addr, end addr, string)
599 * If return address lies on [start addr, end addr],
600 * unwinder displays 'string'
601 */
602
603 .align 4
604.global microblaze_trap_handlers
605microblaze_trap_handlers:
606 /* Exact matches come first */
607 .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
608 .word ret_from_intr; .word ret_from_intr ; .word type_IRQ
609 /* Fuzzy matches go here */
610 .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
611 .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
612 /* End of table */
613 .word 0 ; .word 0 ; .word 0