blob: f5fe220954b0e636cbaec5d728f3bb406ad17160 [file] [log] [blame]
Michal Simekca545022009-05-26 16:30:21 +02001/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
Michal Simek11d51362009-12-07 08:21:34 +010034#undef DEBUG
35
Michal Simekca545022009-05-26 16:30:21 +020036/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
Michal Simek3fbd93e2010-06-22 13:51:50 +020095 msrclr r11, MSR_VMS | MSR_UMS
Michal Simekca545022009-05-26 16:30:21 +020096 nop
97 .endm
98#else
99 .macro clear_bip
100 mfs r11, rmsr
101 nop
102 andi r11, r11, ~MSR_BIP
103 mts rmsr, r11
104 nop
105 .endm
106
107 .macro set_bip
108 mfs r11, rmsr
109 nop
110 ori r11, r11, MSR_BIP
111 mts rmsr, r11
112 nop
113 .endm
114
115 .macro clear_eip
116 mfs r11, rmsr
117 nop
118 andi r11, r11, ~MSR_EIP
119 mts rmsr, r11
120 nop
121 .endm
122
123 .macro set_ee
124 mfs r11, rmsr
125 nop
126 ori r11, r11, MSR_EE
127 mts rmsr, r11
128 nop
129 .endm
130
131 .macro disable_irq
132 mfs r11, rmsr
133 nop
134 andi r11, r11, ~MSR_IE
135 mts rmsr, r11
136 nop
137 .endm
138
139 .macro enable_irq
140 mfs r11, rmsr
141 nop
142 ori r11, r11, MSR_IE
143 mts rmsr, r11
144 nop
145 .endm
146
147 .macro set_ums
148 mfs r11, rmsr
149 nop
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
152 mts rmsr, r11
153 nop
154 .endm
155
156 .macro set_vms
157 mfs r11, rmsr
158 nop
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
161 mts rmsr, r11
162 nop
163 .endm
164
165 .macro clear_vms_ums
166 mfs r11, rmsr
167 nop
168 andni r11, r11, (MSR_VMS|MSR_UMS)
169 mts rmsr,r11
170 nop
171 .endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON \
Michal Simeka4a94db2010-06-22 13:15:53 +0200181 set_ums; \
Michal Simekca545022009-05-26 16:30:21 +0200182 rted r0, 2f; \
Michal Simeka4a94db2010-06-22 13:15:53 +0200183 nop; \
1842:
Michal Simekca545022009-05-26 16:30:21 +0200185
186/* turn off virtual protected mode save and user mode save*/
187#define VM_OFF \
Michal Simeka4a94db2010-06-22 13:15:53 +0200188 clear_vms_ums; \
Michal Simekca545022009-05-26 16:30:21 +0200189 rted r0, TOPHYS(1f); \
Michal Simeka4a94db2010-06-22 13:15:53 +0200190 nop; \
1911:
Michal Simekca545022009-05-26 16:30:21 +0200192
193#define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
Michal Simek36f60952010-06-22 13:27:43 +0200195 swi r3, r1, PTO+PT_R3; \
196 swi r4, r1, PTO+PT_R4; \
Michal Simekca545022009-05-26 16:30:21 +0200197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
223 nop; \
224 swi r11, r1, PTO+PT_MSR;
225
226#define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
228 mts rmsr , r11; \
229 nop; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
Michal Simek36f60952010-06-22 13:27:43 +0200231 lwi r3, r1, PTO+PT_R3; \
232 lwi r4, r1, PTO+PT_R4; \
Michal Simekca545022009-05-26 16:30:21 +0200233 lwi r5, r1, PTO+PT_R5; \
234 lwi r6, r1, PTO+PT_R6; \
235 lwi r7, r1, PTO+PT_R7; \
236 lwi r8, r1, PTO+PT_R8; \
237 lwi r9, r1, PTO+PT_R9; \
238 lwi r10, r1, PTO+PT_R10; \
239 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
240 lwi r12, r1, PTO+PT_R12; \
241 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
242 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
243 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
244 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
245 lwi r19, r1, PTO+PT_R19; \
246 lwi r20, r1, PTO+PT_R20; \
247 lwi r21, r1, PTO+PT_R21; \
248 lwi r22, r1, PTO+PT_R22; \
249 lwi r23, r1, PTO+PT_R23; \
250 lwi r24, r1, PTO+PT_R24; \
251 lwi r25, r1, PTO+PT_R25; \
252 lwi r26, r1, PTO+PT_R26; \
253 lwi r27, r1, PTO+PT_R27; \
254 lwi r28, r1, PTO+PT_R28; \
255 lwi r29, r1, PTO+PT_R29; \
256 lwi r30, r1, PTO+PT_R30; \
257 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
258
259.text
260
261/*
262 * User trap.
263 *
264 * System calls are handled here.
265 *
266 * Syscall protocol:
267 * Syscall number in r12, args in r5-r10
268 * Return value in r3
269 *
270 * Trap entered via brki instruction, so BIP bit is set, and interrupts
271 * are masked. This is nice, means we don't have to CLI before state save
272 */
273C_ENTRY(_user_exception):
274 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
275 addi r14, r14, 4 /* return address is 4 byte after call */
276 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
277
278 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
279 beqi r11, 1f; /* Jump ahead if coming from user */
280/* Kernel-mode state save. */
281 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
282 tophys(r1,r11);
283 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
284 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
285
286 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
287 SAVE_REGS
288
289 addi r11, r0, 1; /* Was in kernel-mode. */
290 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
291 brid 2f;
292 nop; /* Fill delay slot */
293
294/* User-mode state save. */
2951:
296 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
297 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
298 tophys(r1,r1);
299 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
300/* calculate kernel stack pointer from task struct 8k */
301 addik r1, r1, THREAD_SIZE;
302 tophys(r1,r1);
303
304 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
305 SAVE_REGS
306
307 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
308 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
309 swi r11, r1, PTO+PT_R1; /* Store user SP. */
310 addi r11, r0, 1;
311 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
Michal Simekb1d70c62010-01-22 10:24:06 +01003122: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200313 /* Save away the syscall number. */
314 swi r12, r1, PTO+PT_R0;
315 tovirt(r1,r1)
316
Michal Simekca545022009-05-26 16:30:21 +0200317/* where the trap should return need -8 to adjust for rtsd r15, 8*/
318/* Jump to the appropriate function for the system call number in r12
319 * (r12 is not preserved), or return an error if r12 is not valid. The LP
320 * register should point to the location where
321 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
Michal Simek23575482009-08-24 13:26:04 +0200322
323 # Step into virtual mode.
324 set_vms;
325 addik r11, r0, 3f
326 rtid r11, 0
327 nop
3283:
Michal Simekb1d70c62010-01-22 10:24:06 +0100329 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200330 lwi r11, r11, TI_FLAGS /* get flags in thread info */
331 andi r11, r11, _TIF_WORK_SYSCALL_MASK
332 beqi r11, 4f
333
334 addik r3, r0, -ENOSYS
335 swi r3, r1, PTO + PT_R3
336 brlid r15, do_syscall_trace_enter
337 addik r5, r1, PTO + PT_R0
338
339 # do_syscall_trace_enter returns the new syscall nr.
340 addk r12, r0, r3
341 lwi r5, r1, PTO+PT_R5;
342 lwi r6, r1, PTO+PT_R6;
343 lwi r7, r1, PTO+PT_R7;
344 lwi r8, r1, PTO+PT_R8;
345 lwi r9, r1, PTO+PT_R9;
346 lwi r10, r1, PTO+PT_R10;
3474:
348/* Jump to the appropriate function for the system call number in r12
349 * (r12 is not preserved), or return an error if r12 is not valid.
350 * The LP register should point to the location where the called function
351 * should return. [note that MAKE_SYS_CALL uses label 1] */
352 /* See if the system call number is valid */
Michal Simekca545022009-05-26 16:30:21 +0200353 addi r11, r12, -__NR_syscalls;
Michal Simek23575482009-08-24 13:26:04 +0200354 bgei r11,5f;
Michal Simekca545022009-05-26 16:30:21 +0200355 /* Figure out which function to use for this system call. */
356 /* Note Microblaze barrel shift is optional, so don't rely on it */
357 add r12, r12, r12; /* convert num -> ptr */
358 add r12, r12, r12;
359
Michal Simek11d51362009-12-07 08:21:34 +0100360#ifdef DEBUG
Michal Simekca545022009-05-26 16:30:21 +0200361 /* Trac syscalls and stored them to r0_ram */
Michal Simek23575482009-08-24 13:26:04 +0200362 lwi r3, r12, 0x400 + r0_ram
Michal Simekca545022009-05-26 16:30:21 +0200363 addi r3, r3, 1
Michal Simek23575482009-08-24 13:26:04 +0200364 swi r3, r12, 0x400 + r0_ram
Michal Simek11d51362009-12-07 08:21:34 +0100365#endif
Michal Simekca545022009-05-26 16:30:21 +0200366
Michal Simek23575482009-08-24 13:26:04 +0200367 # Find and jump into the syscall handler.
368 lwi r12, r12, sys_call_table
369 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
370 la r15, r0, ret_from_trap-8
371 bra r12
372
Michal Simekca545022009-05-26 16:30:21 +0200373 /* The syscall number is invalid, return an error. */
Michal Simek23575482009-08-24 13:26:04 +02003745:
Michal Simekca545022009-05-26 16:30:21 +0200375 addi r3, r0, -ENOSYS;
376 rtsd r15,8; /* looks like a normal subroutine return */
377 or r0, r0, r0
378
379
Michal Simek23575482009-08-24 13:26:04 +0200380/* Entry point used to return from a syscall/trap */
Michal Simekca545022009-05-26 16:30:21 +0200381/* We re-enable BIP bit before state restore */
382C_ENTRY(ret_from_trap):
383 set_bip; /* Ints masked for state restore*/
Michal Simekb1d70c62010-01-22 10:24:06 +0100384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
Michal Simek36f60952010-06-22 13:27:43 +0200387 lwi r11, r1, PTO+PT_MODE;
388/* See if returning to kernel mode, if so, skip resched &c. */
389 bnei r11, 2f;
Michal Simekca545022009-05-26 16:30:21 +0200390 /* We're returning to user mode, so check for various conditions that
391 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100392 /* FIXME: Restructure all these flag checks. */
393 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200394 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
395 andi r11, r11, _TIF_WORK_SYSCALL_MASK
396 beqi r11, 1f
397
Michal Simek23575482009-08-24 13:26:04 +0200398 brlid r15, do_syscall_trace_leave
399 addik r5, r1, PTO + PT_R0
Michal Simek23575482009-08-24 13:26:04 +02004001:
Michal Simek23575482009-08-24 13:26:04 +0200401 /* We're returning to user mode, so check for various conditions that
402 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100403 /* get thread info from current task */
404 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200405 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
406 andi r11, r11, _TIF_NEED_RESCHED;
407 beqi r11, 5f;
408
Michal Simekca545022009-05-26 16:30:21 +0200409 bralid r15, schedule; /* Call scheduler */
410 nop; /* delay slot */
Michal Simekca545022009-05-26 16:30:21 +0200411
412 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01004135: /* get thread info from current task*/
414 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200415 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
416 andi r11, r11, _TIF_SIGPENDING;
417 beqi r11, 1f; /* Signals to handle, handle them */
418
Michal Simekca545022009-05-26 16:30:21 +0200419 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200420 addi r7, r0, 1; /* Arg 3: int in_syscall */
421 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100422 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekb1d70c62010-01-22 10:24:06 +0100423
424/* Finally, return to user state. */
4251:
Michal Simekb1d70c62010-01-22 10:24:06 +0100426 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
Michal Simek8633beb2010-02-22 13:24:43 +0100427 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200428 VM_OFF;
429 tophys(r1,r1);
430 RESTORE_REGS;
431 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
432 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
433 bri 6f;
434
435/* Return to kernel state. */
4362: VM_OFF;
437 tophys(r1,r1);
438 RESTORE_REGS;
439 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
440 tovirt(r1,r1);
4416:
442TRAP_return: /* Make global symbol for debugging */
443 rtbd r14, 0; /* Instructions to return from an IRQ */
444 nop;
445
446
447/* These syscalls need access to the struct pt_regs on the stack, so we
448 implement them in assembly (they're basically all wrappers anyway). */
449
450C_ENTRY(sys_fork_wrapper):
451 addi r5, r0, SIGCHLD /* Arg 0: flags */
452 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
453 la r7, r1, PTO /* Arg 2: parent context */
454 add r8. r0, r0 /* Arg 3: (unused) */
455 add r9, r0, r0; /* Arg 4: (unused) */
456 add r10, r0, r0; /* Arg 5: (unused) */
457 brid do_fork /* Do real work (tail-call) */
458 nop;
459
460/* This the initial entry point for a new child thread, with an appropriate
461 stack in place that makes it look the the child is in the middle of an
462 syscall. This function is actually `returned to' from switch_thread
463 (copy_thread makes ret_from_fork the return address in each new thread's
464 saved context). */
465C_ENTRY(ret_from_fork):
466 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
467 add r3, r5, r0; /* switch_thread returns the prev task */
468 /* ( in the delay slot ) */
469 add r3, r0, r0; /* Child's fork call should return 0. */
470 brid ret_from_trap; /* Do normal trap return */
471 nop;
472
Arnd Bergmanne5135882009-06-18 19:55:30 +0200473C_ENTRY(sys_vfork):
474 brid microblaze_vfork /* Do real work (tail-call) */
Michal Simekca545022009-05-26 16:30:21 +0200475 la r5, r1, PTO
Michal Simekca545022009-05-26 16:30:21 +0200476
Arnd Bergmanne5135882009-06-18 19:55:30 +0200477C_ENTRY(sys_clone):
Michal Simekca545022009-05-26 16:30:21 +0200478 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
Michal Simek570e3e22010-06-04 13:06:27 +0200479 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
4801: add r10, r0, r9; /* Arg 6: (child_tidptr) */
481 add r9, r0, r8; /* Arg 5: (parent_tidptr) */
482 add r8, r0, r7; /* Arg 4: (stack_size) */
483 la r7, r1, PTO; /* Arg 3: pt_regs */
484 brid do_fork /* Do real work (tail-call) */
485 nop
Michal Simekca545022009-05-26 16:30:21 +0200486
Arnd Bergmanne5135882009-06-18 19:55:30 +0200487C_ENTRY(sys_execve):
Michal Simekca545022009-05-26 16:30:21 +0200488 la r8, r1, PTO; /* add user context as 4th arg */
Arnd Bergmanne5135882009-06-18 19:55:30 +0200489 brid microblaze_execve; /* Do real work (tail-call).*/
Michal Simekca545022009-05-26 16:30:21 +0200490 nop;
491
Michal Simekca545022009-05-26 16:30:21 +0200492C_ENTRY(sys_rt_sigreturn_wrapper):
493 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
494 swi r4, r1, PTO+PT_R4;
495 la r5, r1, PTO; /* add user context as 1st arg */
496 brlid r15, sys_rt_sigreturn /* Do real work */
497 nop;
498 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
499 lwi r4, r1, PTO+PT_R4;
500 bri ret_from_trap /* fall through will not work here due to align */
501 nop;
502
503/*
504 * HW EXCEPTION rutine start
505 */
506
507#define SAVE_STATE \
508 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
509 set_bip; /*equalize initial state for all possible entries*/\
510 clear_eip; \
511 enable_irq; \
512 set_ee; \
513 /* See if already in kernel mode.*/ \
514 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
515 beqi r11, 1f; /* Jump ahead if coming from user */\
516 /* Kernel-mode state save. */ \
517 /* Reload kernel stack-ptr. */ \
518 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
519 tophys(r1,r11); \
520 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
521 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
522 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
Michal Simekca545022009-05-26 16:30:21 +0200523 SAVE_REGS \
524 /* PC, before IRQ/trap - this is one instruction above */ \
525 swi r17, r1, PTO+PT_PC; \
526 \
527 addi r11, r0, 1; /* Was in kernel-mode. */ \
528 swi r11, r1, PTO+PT_MODE; \
529 brid 2f; \
530 nop; /* Fill delay slot */ \
5311: /* User-mode state save. */ \
532 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
533 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
534 tophys(r1,r1); \
535 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
536 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
537 tophys(r1,r1); \
538 \
539 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
Michal Simekca545022009-05-26 16:30:21 +0200540 SAVE_REGS \
541 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
542 swi r17, r1, PTO+PT_PC; \
543 \
544 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
545 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
546 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
547 addi r11, r0, 1; \
548 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
Michal Simekb1d70c62010-01-22 10:24:06 +01005492: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
Michal Simekca545022009-05-26 16:30:21 +0200550 /* Save away the syscall number. */ \
551 swi r0, r1, PTO+PT_R0; \
552 tovirt(r1,r1)
553
554C_ENTRY(full_exception_trap):
555 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
556 /* adjust exception address for privileged instruction
557 * for finding where is it */
558 addik r17, r17, -4
559 SAVE_STATE /* Save registers */
560 /* FIXME this can be store directly in PT_ESR reg.
561 * I tested it but there is a fault */
562 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
563 la r15, r0, ret_from_exc - 8
564 la r5, r1, PTO /* parameter struct pt_regs * regs */
565 mfs r6, resr
566 nop
567 mfs r7, rfsr; /* save FSR */
568 nop
Michal Simek131e4e92009-09-28 08:50:53 +0200569 mts rfsr, r0; /* Clear sticky fsr */
570 nop
Michal Simekca545022009-05-26 16:30:21 +0200571 la r12, r0, full_exception
572 set_vms;
573 rtbd r12, 0;
574 nop;
575
576/*
577 * Unaligned data trap.
578 *
579 * Unaligned data trap last on 4k page is handled here.
580 *
581 * Trap entered via exception, so EE bit is set, and interrupts
582 * are masked. This is nice, means we don't have to CLI before state save
583 *
584 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
585 */
586C_ENTRY(unaligned_data_trap):
587 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
588 SAVE_STATE /* Save registers.*/
589 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
590 la r15, r0, ret_from_exc-8
591 mfs r3, resr /* ESR */
592 nop
593 mfs r4, rear /* EAR */
594 nop
595 la r7, r1, PTO /* parameter struct pt_regs * regs */
596 la r12, r0, _unaligned_data_exception
597 set_vms;
598 rtbd r12, 0; /* interrupts enabled */
599 nop;
600
601/*
602 * Page fault traps.
603 *
604 * If the real exception handler (from hw_exception_handler.S) didn't find
605 * the mapping for the process, then we're thrown here to handle such situation.
606 *
607 * Trap entered via exceptions, so EE bit is set, and interrupts
608 * are masked. This is nice, means we don't have to CLI before state save
609 *
610 * Build a standard exception frame for TLB Access errors. All TLB exceptions
611 * will bail out to this point if they can't resolve the lightweight TLB fault.
612 *
613 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
614 * void do_page_fault(struct pt_regs *regs,
615 * unsigned long address,
616 * unsigned long error_code)
617 */
618/* data and intruction trap - which is choose is resolved int fault.c */
619C_ENTRY(page_fault_data_trap):
620 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
621 SAVE_STATE /* Save registers.*/
622 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
623 la r15, r0, ret_from_exc-8
624 la r5, r1, PTO /* parameter struct pt_regs * regs */
625 mfs r6, rear /* parameter unsigned long address */
626 nop
627 mfs r7, resr /* parameter unsigned long error_code */
628 nop
629 la r12, r0, do_page_fault
630 set_vms;
631 rtbd r12, 0; /* interrupts enabled */
632 nop;
633
634C_ENTRY(page_fault_instr_trap):
635 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
636 SAVE_STATE /* Save registers.*/
637 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
638 la r15, r0, ret_from_exc-8
639 la r5, r1, PTO /* parameter struct pt_regs * regs */
640 mfs r6, rear /* parameter unsigned long address */
641 nop
642 ori r7, r0, 0 /* parameter unsigned long error_code */
643 la r12, r0, do_page_fault
644 set_vms;
645 rtbd r12, 0; /* interrupts enabled */
646 nop;
647
648/* Entry point used to return from an exception. */
649C_ENTRY(ret_from_exc):
650 set_bip; /* Ints masked for state restore*/
651 lwi r11, r1, PTO+PT_MODE;
652 bnei r11, 2f; /* See if returning to kernel mode, */
653 /* ... if so, skip resched &c. */
654
655 /* We're returning to user mode, so check for various conditions that
656 trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100657 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200658 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
659 andi r11, r11, _TIF_NEED_RESCHED;
660 beqi r11, 5f;
661
662/* Call the scheduler before returning from a syscall/trap. */
663 bralid r15, schedule; /* Call scheduler */
664 nop; /* delay slot */
665
666 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01006675: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200668 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
669 andi r11, r11, _TIF_SIGPENDING;
670 beqi r11, 1f; /* Signals to handle, handle them */
671
672 /*
673 * Handle a signal return; Pending signals should be in r18.
674 *
675 * Not all registers are saved by the normal trap/interrupt entry
676 * points (for instance, call-saved registers (because the normal
677 * C-compiler calling sequence in the kernel makes sure they're
678 * preserved), and call-clobbered registers in the case of
679 * traps), but signal handlers may want to examine or change the
680 * complete register state. Here we save anything not saved by
681 * the normal entry sequence, so that it may be safely restored
Michal Simek36f60952010-06-22 13:27:43 +0200682 * (in a possibly modified form) after do_signal returns. */
Michal Simekca545022009-05-26 16:30:21 +0200683 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200684 addi r7, r0, 0; /* Arg 3: int in_syscall */
685 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100686 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekca545022009-05-26 16:30:21 +0200687
688/* Finally, return to user state. */
6891: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
Michal Simek8633beb2010-02-22 13:24:43 +0100690 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200691 VM_OFF;
692 tophys(r1,r1);
693
Michal Simekca545022009-05-26 16:30:21 +0200694 RESTORE_REGS;
695 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
696
697 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
698 bri 6f;
699/* Return to kernel state. */
7002: VM_OFF;
701 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200702 RESTORE_REGS;
703 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
704
705 tovirt(r1,r1);
7066:
707EXC_return: /* Make global symbol for debugging */
708 rtbd r14, 0; /* Instructions to return from an IRQ */
709 nop;
710
711/*
712 * HW EXCEPTION rutine end
713 */
714
715/*
716 * Hardware maskable interrupts.
717 *
718 * The stack-pointer (r1) should have already been saved to the memory
719 * location PER_CPU(ENTRY_SP).
720 */
721C_ENTRY(_interrupt):
722/* MS: we are in physical address */
723/* Save registers, switch to proper stack, convert SP to virtual.*/
724 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
725 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
726 /* MS: See if already in kernel mode. */
727 lwi r11, r0, TOPHYS(PER_CPU(KM));
728 beqi r11, 1f; /* MS: Jump ahead if coming from user */
729
730/* Kernel-mode state save. */
731 or r11, r1, r0
732 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
733/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
734 swi r11, r1, (PT_R1 - PT_SIZE);
735/* MS: restore r11 because of saving in SAVE_REGS */
736 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
737 /* save registers */
738/* MS: Make room on the stack -> activation record */
739 addik r1, r1, -STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200740 SAVE_REGS
741 /* MS: store mode */
742 addi r11, r0, 1; /* MS: Was in kernel-mode. */
743 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
744 brid 2f;
745 nop; /* MS: Fill delay slot */
746
7471:
748/* User-mode state save. */
749/* MS: restore r11 -> FIXME move before SAVE_REG */
750 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
751 /* MS: get the saved current */
752 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
753 tophys(r1,r1);
754 lwi r1, r1, TS_THREAD_INFO;
755 addik r1, r1, THREAD_SIZE;
756 tophys(r1,r1);
757 /* save registers */
758 addik r1, r1, -STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200759 SAVE_REGS
760 /* calculate mode */
761 swi r0, r1, PTO + PT_MODE;
762 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
763 swi r11, r1, PTO+PT_R1;
764 /* setup kernel mode to KM */
765 addi r11, r0, 1;
766 swi r11, r0, TOPHYS(PER_CPU(KM));
767
7682:
Michal Simekb1d70c62010-01-22 10:24:06 +0100769 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200770 swi r0, r1, PTO + PT_R0;
771 tovirt(r1,r1)
772 la r5, r1, PTO;
773 set_vms;
774 la r11, r0, do_IRQ;
775 la r15, r0, irq_call;
776irq_call:rtbd r11, 0;
777 nop;
778
779/* MS: we are in virtual mode */
780ret_from_irq:
781 lwi r11, r1, PTO + PT_MODE;
782 bnei r11, 2f;
783
Michal Simekb1d70c62010-01-22 10:24:06 +0100784 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200785 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
786 andi r11, r11, _TIF_NEED_RESCHED;
787 beqi r11, 5f
788 bralid r15, schedule;
789 nop; /* delay slot */
790
791 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01007925: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200793 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
794 andi r11, r11, _TIF_SIGPENDING;
795 beqid r11, no_intr_resched
796/* Handle a signal return; Pending signals should be in r18. */
797 addi r7, r0, 0; /* Arg 3: int in_syscall */
798 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
799 bralid r15, do_signal; /* Handle any signals */
800 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
801
802/* Finally, return to user state. */
803no_intr_resched:
804 /* Disable interrupts, we are now committed to the state restore */
805 disable_irq
806 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
Michal Simek8633beb2010-02-22 13:24:43 +0100807 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
Michal Simekca545022009-05-26 16:30:21 +0200808 VM_OFF;
809 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200810 RESTORE_REGS
811 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
812 lwi r1, r1, PT_R1 - PT_SIZE;
813 bri 6f;
814/* MS: Return to kernel state. */
Michal Simek77753792010-01-12 09:55:10 +01008152:
816#ifdef CONFIG_PREEMPT
Michal Simekb1d70c62010-01-22 10:24:06 +0100817 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simek77753792010-01-12 09:55:10 +0100818 /* MS: get preempt_count from thread info */
819 lwi r5, r11, TI_PREEMPT_COUNT;
820 bgti r5, restore;
821
822 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
823 andi r5, r5, _TIF_NEED_RESCHED;
824 beqi r5, restore /* if zero jump over */
825
826preempt:
827 /* interrupts are off that's why I am calling preempt_chedule_irq */
828 bralid r15, preempt_schedule_irq
829 nop
Michal Simekb1d70c62010-01-22 10:24:06 +0100830 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek77753792010-01-12 09:55:10 +0100831 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
832 andi r5, r5, _TIF_NEED_RESCHED;
833 bnei r5, preempt /* if non zero jump to resched */
834restore:
835#endif
836 VM_OFF /* MS: turn off MMU */
Michal Simekca545022009-05-26 16:30:21 +0200837 tophys(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200838 RESTORE_REGS
839 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
840 tovirt(r1,r1);
8416:
842IRQ_return: /* MS: Make global symbol for debugging */
843 rtid r14, 0
844 nop
845
846/*
847 * `Debug' trap
848 * We enter dbtrap in "BIP" (breakpoint) mode.
849 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
850 * original dbtrap.
851 * however, wait to save state first
852 */
853C_ENTRY(_debug_exception):
854 /* BIP bit is set on entry, no interrupts can occur */
855 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
856
857 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
858 set_bip; /*equalize initial state for all possible entries*/
859 clear_eip;
860 enable_irq;
861 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
862 beqi r11, 1f; /* Jump ahead if coming from user */
863 /* Kernel-mode state save. */
864 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
865 tophys(r1,r11);
866 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
867 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
868
869 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
Michal Simekca545022009-05-26 16:30:21 +0200870 SAVE_REGS;
871
872 addi r11, r0, 1; /* Was in kernel-mode. */
873 swi r11, r1, PTO + PT_MODE;
874 brid 2f;
875 nop; /* Fill delay slot */
8761: /* User-mode state save. */
877 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
878 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
879 tophys(r1,r1);
880 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
881 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
882 tophys(r1,r1);
883
884 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
Michal Simekca545022009-05-26 16:30:21 +0200885 SAVE_REGS;
886
887 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
888 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
889 swi r11, r1, PTO+PT_R1; /* Store user SP. */
890 addi r11, r0, 1;
891 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
Michal Simekb1d70c62010-01-22 10:24:06 +01008922: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200893 /* Save away the syscall number. */
894 swi r0, r1, PTO+PT_R0;
895 tovirt(r1,r1)
896
897 addi r5, r0, SIGTRAP /* send the trap signal */
898 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
899 addk r7, r0, r0 /* 3rd param zero */
900
901 set_vms;
902 la r11, r0, send_sig;
903 la r15, r0, dbtrap_call;
904dbtrap_call: rtbd r11, 0;
905 nop;
906
907 set_bip; /* Ints masked for state restore*/
908 lwi r11, r1, PTO+PT_MODE;
909 bnei r11, 2f;
910
911 /* Get current task ptr into r11 */
Michal Simekb1d70c62010-01-22 10:24:06 +0100912 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200913 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
914 andi r11, r11, _TIF_NEED_RESCHED;
915 beqi r11, 5f;
916
917/* Call the scheduler before returning from a syscall/trap. */
918
919 bralid r15, schedule; /* Call scheduler */
920 nop; /* delay slot */
921 /* XXX Is PT_DTRACE handling needed here? */
922 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
923
924 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01009255: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200926 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
927 andi r11, r11, _TIF_SIGPENDING;
928 beqi r11, 1f; /* Signals to handle, handle them */
929
930/* Handle a signal return; Pending signals should be in r18. */
931 /* Not all registers are saved by the normal trap/interrupt entry
932 points (for instance, call-saved registers (because the normal
933 C-compiler calling sequence in the kernel makes sure they're
934 preserved), and call-clobbered registers in the case of
935 traps), but signal handlers may want to examine or change the
936 complete register state. Here we save anything not saved by
937 the normal entry sequence, so that it may be safely restored
938 (in a possibly modified form) after do_signal returns. */
939
940 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200941 addi r7, r0, 0; /* Arg 3: int in_syscall */
942 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100943 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekca545022009-05-26 16:30:21 +0200944
945
946/* Finally, return to user state. */
9471: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
Michal Simek8633beb2010-02-22 13:24:43 +0100948 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200949 VM_OFF;
950 tophys(r1,r1);
951
Michal Simekca545022009-05-26 16:30:21 +0200952 RESTORE_REGS
953 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
954
955
956 lwi r1, r1, PT_R1 - PT_SIZE;
957 /* Restore user stack pointer. */
958 bri 6f;
959
960/* Return to kernel state. */
9612: VM_OFF;
962 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200963 RESTORE_REGS
964 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
965
966 tovirt(r1,r1);
9676:
968DBTRAP_return: /* Make global symbol for debugging */
969 rtbd r14, 0; /* Instructions to return from an IRQ */
970 nop;
971
972
973
974ENTRY(_switch_to)
975 /* prepare return value */
Michal Simekb1d70c62010-01-22 10:24:06 +0100976 addk r3, r0, CURRENT_TASK
Michal Simekca545022009-05-26 16:30:21 +0200977
978 /* save registers in cpu_context */
979 /* use r11 and r12, volatile registers, as temp register */
980 /* give start of cpu_context for previous process */
981 addik r11, r5, TI_CPU_CONTEXT
982 swi r1, r11, CC_R1
983 swi r2, r11, CC_R2
984 /* skip volatile registers.
985 * they are saved on stack when we jumped to _switch_to() */
986 /* dedicated registers */
987 swi r13, r11, CC_R13
988 swi r14, r11, CC_R14
989 swi r15, r11, CC_R15
990 swi r16, r11, CC_R16
991 swi r17, r11, CC_R17
992 swi r18, r11, CC_R18
993 /* save non-volatile registers */
994 swi r19, r11, CC_R19
995 swi r20, r11, CC_R20
996 swi r21, r11, CC_R21
997 swi r22, r11, CC_R22
998 swi r23, r11, CC_R23
999 swi r24, r11, CC_R24
1000 swi r25, r11, CC_R25
1001 swi r26, r11, CC_R26
1002 swi r27, r11, CC_R27
1003 swi r28, r11, CC_R28
1004 swi r29, r11, CC_R29
1005 swi r30, r11, CC_R30
1006 /* special purpose registers */
1007 mfs r12, rmsr
1008 nop
1009 swi r12, r11, CC_MSR
1010 mfs r12, rear
1011 nop
1012 swi r12, r11, CC_EAR
1013 mfs r12, resr
1014 nop
1015 swi r12, r11, CC_ESR
1016 mfs r12, rfsr
1017 nop
1018 swi r12, r11, CC_FSR
1019
Michal Simekb1d70c62010-01-22 10:24:06 +01001020 /* update r31, the current-give me pointer to task which will be next */
1021 lwi CURRENT_TASK, r6, TI_TASK
Michal Simekca545022009-05-26 16:30:21 +02001022 /* stored it to current_save too */
Michal Simekb1d70c62010-01-22 10:24:06 +01001023 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
Michal Simekca545022009-05-26 16:30:21 +02001024
1025 /* get new process' cpu context and restore */
1026 /* give me start where start context of next task */
1027 addik r11, r6, TI_CPU_CONTEXT
1028
1029 /* non-volatile registers */
1030 lwi r30, r11, CC_R30
1031 lwi r29, r11, CC_R29
1032 lwi r28, r11, CC_R28
1033 lwi r27, r11, CC_R27
1034 lwi r26, r11, CC_R26
1035 lwi r25, r11, CC_R25
1036 lwi r24, r11, CC_R24
1037 lwi r23, r11, CC_R23
1038 lwi r22, r11, CC_R22
1039 lwi r21, r11, CC_R21
1040 lwi r20, r11, CC_R20
1041 lwi r19, r11, CC_R19
1042 /* dedicated registers */
1043 lwi r18, r11, CC_R18
1044 lwi r17, r11, CC_R17
1045 lwi r16, r11, CC_R16
1046 lwi r15, r11, CC_R15
1047 lwi r14, r11, CC_R14
1048 lwi r13, r11, CC_R13
1049 /* skip volatile registers */
1050 lwi r2, r11, CC_R2
1051 lwi r1, r11, CC_R1
1052
1053 /* special purpose registers */
1054 lwi r12, r11, CC_FSR
1055 mts rfsr, r12
1056 nop
1057 lwi r12, r11, CC_MSR
1058 mts rmsr, r12
1059 nop
1060
1061 rtsd r15, 8
1062 nop
1063
1064ENTRY(_reset)
1065 brai 0x70; /* Jump back to FS-boot */
1066
1067ENTRY(_break)
1068 mfs r5, rmsr
1069 nop
1070 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1071 mfs r5, resr
1072 nop
1073 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1074 bri 0
1075
1076 /* These are compiled and loaded into high memory, then
1077 * copied into place in mach_early_setup */
1078 .section .init.ivt, "ax"
1079 .org 0x0
1080 /* this is very important - here is the reset vector */
1081 /* in current MMU branch you don't care what is here - it is
1082 * used from bootloader site - but this is correct for FS-BOOT */
1083 brai 0x70
1084 nop
1085 brai TOPHYS(_user_exception); /* syscall handler */
1086 brai TOPHYS(_interrupt); /* Interrupt handler */
1087 brai TOPHYS(_break); /* nmi trap handler */
1088 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1089
1090 .org 0x60
1091 brai TOPHYS(_debug_exception); /* debug trap handler*/
1092
1093.section .rodata,"a"
1094#include "syscall_table.S"
1095
1096syscall_table_size=(.-sys_call_table)
1097
Steven J. Magnanice3266c2010-04-27 12:37:54 -05001098type_SYSCALL:
1099 .ascii "SYSCALL\0"
1100type_IRQ:
1101 .ascii "IRQ\0"
1102type_IRQ_PREEMPT:
1103 .ascii "IRQ (PREEMPTED)\0"
1104type_SYSCALL_PREEMPT:
1105 .ascii " SYSCALL (PREEMPTED)\0"
1106
1107 /*
1108 * Trap decoding for stack unwinder
1109 * Tuples are (start addr, end addr, string)
1110 * If return address lies on [start addr, end addr],
1111 * unwinder displays 'string'
1112 */
1113
1114 .align 4
1115.global microblaze_trap_handlers
1116microblaze_trap_handlers:
1117 /* Exact matches come first */
1118 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1119 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1120 /* Fuzzy matches go here */
1121 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1122 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1123 /* End of table */
1124 .word 0 ; .word 0 ; .word 0