blob: cc9885d441d5ea4d565e6f8958a17c798721c0a1 [file] [log] [blame]
Michal Simekca545022009-05-26 16:30:21 +02001/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
Michal Simek11d51362009-12-07 08:21:34 +010034#undef DEBUG
35
Michal Simekca545022009-05-26 16:30:21 +020036/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
95 msrclr r11, MSR_VMS
96 nop
97 msrclr r11, MSR_UMS
98 nop
99 .endm
100#else
101 .macro clear_bip
102 mfs r11, rmsr
103 nop
104 andi r11, r11, ~MSR_BIP
105 mts rmsr, r11
106 nop
107 .endm
108
109 .macro set_bip
110 mfs r11, rmsr
111 nop
112 ori r11, r11, MSR_BIP
113 mts rmsr, r11
114 nop
115 .endm
116
117 .macro clear_eip
118 mfs r11, rmsr
119 nop
120 andi r11, r11, ~MSR_EIP
121 mts rmsr, r11
122 nop
123 .endm
124
125 .macro set_ee
126 mfs r11, rmsr
127 nop
128 ori r11, r11, MSR_EE
129 mts rmsr, r11
130 nop
131 .endm
132
133 .macro disable_irq
134 mfs r11, rmsr
135 nop
136 andi r11, r11, ~MSR_IE
137 mts rmsr, r11
138 nop
139 .endm
140
141 .macro enable_irq
142 mfs r11, rmsr
143 nop
144 ori r11, r11, MSR_IE
145 mts rmsr, r11
146 nop
147 .endm
148
149 .macro set_ums
150 mfs r11, rmsr
151 nop
152 ori r11, r11, MSR_VMS
153 andni r11, r11, MSR_UMS
154 mts rmsr, r11
155 nop
156 .endm
157
158 .macro set_vms
159 mfs r11, rmsr
160 nop
161 ori r11, r11, MSR_VMS
162 andni r11, r11, MSR_UMS
163 mts rmsr, r11
164 nop
165 .endm
166
167 .macro clear_vms_ums
168 mfs r11, rmsr
169 nop
170 andni r11, r11, (MSR_VMS|MSR_UMS)
171 mts rmsr,r11
172 nop
173 .endm
174#endif
175
176/* Define how to call high-level functions. With MMU, virtual mode must be
177 * enabled when calling the high-level function. Clobbers R11.
178 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 */
180
181/* turn on virtual protected mode save */
182#define VM_ON \
183 set_ums; \
184 rted r0, 2f; \
1852: nop;
186
187/* turn off virtual protected mode save and user mode save*/
188#define VM_OFF \
189 clear_vms_ums; \
190 rted r0, TOPHYS(1f); \
1911: nop;
192
193#define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
195 swi r5, r1, PTO+PT_R5; \
196 swi r6, r1, PTO+PT_R6; \
197 swi r7, r1, PTO+PT_R7; \
198 swi r8, r1, PTO+PT_R8; \
199 swi r9, r1, PTO+PT_R9; \
200 swi r10, r1, PTO+PT_R10; \
201 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
202 swi r12, r1, PTO+PT_R12; \
203 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
204 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
205 swi r15, r1, PTO+PT_R15; /* Save LP */ \
206 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
207 swi r19, r1, PTO+PT_R19; \
208 swi r20, r1, PTO+PT_R20; \
209 swi r21, r1, PTO+PT_R21; \
210 swi r22, r1, PTO+PT_R22; \
211 swi r23, r1, PTO+PT_R23; \
212 swi r24, r1, PTO+PT_R24; \
213 swi r25, r1, PTO+PT_R25; \
214 swi r26, r1, PTO+PT_R26; \
215 swi r27, r1, PTO+PT_R27; \
216 swi r28, r1, PTO+PT_R28; \
217 swi r29, r1, PTO+PT_R29; \
218 swi r30, r1, PTO+PT_R30; \
219 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
220 mfs r11, rmsr; /* save MSR */ \
221 nop; \
222 swi r11, r1, PTO+PT_MSR;
223
224#define RESTORE_REGS \
225 lwi r11, r1, PTO+PT_MSR; \
226 mts rmsr , r11; \
227 nop; \
228 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
229 lwi r5, r1, PTO+PT_R5; \
230 lwi r6, r1, PTO+PT_R6; \
231 lwi r7, r1, PTO+PT_R7; \
232 lwi r8, r1, PTO+PT_R8; \
233 lwi r9, r1, PTO+PT_R9; \
234 lwi r10, r1, PTO+PT_R10; \
235 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
236 lwi r12, r1, PTO+PT_R12; \
237 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
238 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
239 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
240 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
241 lwi r19, r1, PTO+PT_R19; \
242 lwi r20, r1, PTO+PT_R20; \
243 lwi r21, r1, PTO+PT_R21; \
244 lwi r22, r1, PTO+PT_R22; \
245 lwi r23, r1, PTO+PT_R23; \
246 lwi r24, r1, PTO+PT_R24; \
247 lwi r25, r1, PTO+PT_R25; \
248 lwi r26, r1, PTO+PT_R26; \
249 lwi r27, r1, PTO+PT_R27; \
250 lwi r28, r1, PTO+PT_R28; \
251 lwi r29, r1, PTO+PT_R29; \
252 lwi r30, r1, PTO+PT_R30; \
253 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
254
255.text
256
257/*
258 * User trap.
259 *
260 * System calls are handled here.
261 *
262 * Syscall protocol:
263 * Syscall number in r12, args in r5-r10
264 * Return value in r3
265 *
266 * Trap entered via brki instruction, so BIP bit is set, and interrupts
267 * are masked. This is nice, means we don't have to CLI before state save
268 */
269C_ENTRY(_user_exception):
270 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
271 addi r14, r14, 4 /* return address is 4 byte after call */
272 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
273
274 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
275 beqi r11, 1f; /* Jump ahead if coming from user */
276/* Kernel-mode state save. */
277 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
278 tophys(r1,r11);
279 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
280 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
281
282 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
283 SAVE_REGS
284
285 addi r11, r0, 1; /* Was in kernel-mode. */
286 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
287 brid 2f;
288 nop; /* Fill delay slot */
289
290/* User-mode state save. */
2911:
292 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
293 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
294 tophys(r1,r1);
295 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
296/* calculate kernel stack pointer from task struct 8k */
297 addik r1, r1, THREAD_SIZE;
298 tophys(r1,r1);
299
300 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
301 SAVE_REGS
302
303 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
304 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
305 swi r11, r1, PTO+PT_R1; /* Store user SP. */
306 addi r11, r0, 1;
307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
Michal Simekb1d70c62010-01-22 10:24:06 +01003082: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200309 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1)
312
Michal Simekca545022009-05-26 16:30:21 +0200313/* where the trap should return need -8 to adjust for rtsd r15, 8*/
314/* Jump to the appropriate function for the system call number in r12
315 * (r12 is not preserved), or return an error if r12 is not valid. The LP
316 * register should point to the location where
317 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
Michal Simek23575482009-08-24 13:26:04 +0200318
319 # Step into virtual mode.
320 set_vms;
321 addik r11, r0, 3f
322 rtid r11, 0
323 nop
3243:
Michal Simekb1d70c62010-01-22 10:24:06 +0100325 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200326 lwi r11, r11, TI_FLAGS /* get flags in thread info */
327 andi r11, r11, _TIF_WORK_SYSCALL_MASK
328 beqi r11, 4f
329
330 addik r3, r0, -ENOSYS
331 swi r3, r1, PTO + PT_R3
332 brlid r15, do_syscall_trace_enter
333 addik r5, r1, PTO + PT_R0
334
335 # do_syscall_trace_enter returns the new syscall nr.
336 addk r12, r0, r3
337 lwi r5, r1, PTO+PT_R5;
338 lwi r6, r1, PTO+PT_R6;
339 lwi r7, r1, PTO+PT_R7;
340 lwi r8, r1, PTO+PT_R8;
341 lwi r9, r1, PTO+PT_R9;
342 lwi r10, r1, PTO+PT_R10;
3434:
344/* Jump to the appropriate function for the system call number in r12
345 * (r12 is not preserved), or return an error if r12 is not valid.
346 * The LP register should point to the location where the called function
347 * should return. [note that MAKE_SYS_CALL uses label 1] */
348 /* See if the system call number is valid */
Michal Simekca545022009-05-26 16:30:21 +0200349 addi r11, r12, -__NR_syscalls;
Michal Simek23575482009-08-24 13:26:04 +0200350 bgei r11,5f;
Michal Simekca545022009-05-26 16:30:21 +0200351 /* Figure out which function to use for this system call. */
352 /* Note Microblaze barrel shift is optional, so don't rely on it */
353 add r12, r12, r12; /* convert num -> ptr */
354 add r12, r12, r12;
355
Michal Simek11d51362009-12-07 08:21:34 +0100356#ifdef DEBUG
Michal Simekca545022009-05-26 16:30:21 +0200357 /* Trac syscalls and stored them to r0_ram */
Michal Simek23575482009-08-24 13:26:04 +0200358 lwi r3, r12, 0x400 + r0_ram
Michal Simekca545022009-05-26 16:30:21 +0200359 addi r3, r3, 1
Michal Simek23575482009-08-24 13:26:04 +0200360 swi r3, r12, 0x400 + r0_ram
Michal Simek11d51362009-12-07 08:21:34 +0100361#endif
Michal Simekca545022009-05-26 16:30:21 +0200362
Michal Simek23575482009-08-24 13:26:04 +0200363 # Find and jump into the syscall handler.
364 lwi r12, r12, sys_call_table
365 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
366 la r15, r0, ret_from_trap-8
367 bra r12
368
Michal Simekca545022009-05-26 16:30:21 +0200369 /* The syscall number is invalid, return an error. */
Michal Simek23575482009-08-24 13:26:04 +02003705:
Michal Simekca545022009-05-26 16:30:21 +0200371 addi r3, r0, -ENOSYS;
372 rtsd r15,8; /* looks like a normal subroutine return */
373 or r0, r0, r0
374
375
Michal Simek23575482009-08-24 13:26:04 +0200376/* Entry point used to return from a syscall/trap */
Michal Simekca545022009-05-26 16:30:21 +0200377/* We re-enable BIP bit before state restore */
378C_ENTRY(ret_from_trap):
379 set_bip; /* Ints masked for state restore*/
380 lwi r11, r1, PTO+PT_MODE;
381/* See if returning to kernel mode, if so, skip resched &c. */
382 bnei r11, 2f;
383
Michal Simekb1d70c62010-01-22 10:24:06 +0100384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
Michal Simekca545022009-05-26 16:30:21 +0200387 /* We're returning to user mode, so check for various conditions that
388 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100389 /* FIXME: Restructure all these flag checks. */
390 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200391 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
392 andi r11, r11, _TIF_WORK_SYSCALL_MASK
393 beqi r11, 1f
394
Michal Simek23575482009-08-24 13:26:04 +0200395 brlid r15, do_syscall_trace_leave
396 addik r5, r1, PTO + PT_R0
Michal Simek23575482009-08-24 13:26:04 +02003971:
Michal Simek23575482009-08-24 13:26:04 +0200398 /* We're returning to user mode, so check for various conditions that
399 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100400 /* get thread info from current task */
401 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200402 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
403 andi r11, r11, _TIF_NEED_RESCHED;
404 beqi r11, 5f;
405
Michal Simekca545022009-05-26 16:30:21 +0200406 bralid r15, schedule; /* Call scheduler */
407 nop; /* delay slot */
Michal Simekca545022009-05-26 16:30:21 +0200408
409 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01004105: /* get thread info from current task*/
411 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200412 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
413 andi r11, r11, _TIF_SIGPENDING;
414 beqi r11, 1f; /* Signals to handle, handle them */
415
Michal Simekca545022009-05-26 16:30:21 +0200416 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
417 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
418 addi r7, r0, 1; /* Arg 3: int in_syscall */
419 bralid r15, do_signal; /* Handle any signals */
420 nop;
Michal Simekb1d70c62010-01-22 10:24:06 +0100421
422/* Finally, return to user state. */
4231:
Michal Simekca545022009-05-26 16:30:21 +0200424 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
425 lwi r4, r1, PTO + PT_R4;
426
Michal Simekb1d70c62010-01-22 10:24:06 +0100427 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
Michal Simek8633beb2010-02-22 13:24:43 +0100428 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200429 VM_OFF;
430 tophys(r1,r1);
431 RESTORE_REGS;
432 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
433 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
434 bri 6f;
435
436/* Return to kernel state. */
4372: VM_OFF;
438 tophys(r1,r1);
439 RESTORE_REGS;
440 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
441 tovirt(r1,r1);
4426:
443TRAP_return: /* Make global symbol for debugging */
444 rtbd r14, 0; /* Instructions to return from an IRQ */
445 nop;
446
447
448/* These syscalls need access to the struct pt_regs on the stack, so we
449 implement them in assembly (they're basically all wrappers anyway). */
450
451C_ENTRY(sys_fork_wrapper):
452 addi r5, r0, SIGCHLD /* Arg 0: flags */
453 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
454 la r7, r1, PTO /* Arg 2: parent context */
455 add r8. r0, r0 /* Arg 3: (unused) */
456 add r9, r0, r0; /* Arg 4: (unused) */
457 add r10, r0, r0; /* Arg 5: (unused) */
458 brid do_fork /* Do real work (tail-call) */
459 nop;
460
461/* This the initial entry point for a new child thread, with an appropriate
462 stack in place that makes it look the the child is in the middle of an
463 syscall. This function is actually `returned to' from switch_thread
464 (copy_thread makes ret_from_fork the return address in each new thread's
465 saved context). */
466C_ENTRY(ret_from_fork):
467 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
468 add r3, r5, r0; /* switch_thread returns the prev task */
469 /* ( in the delay slot ) */
470 add r3, r0, r0; /* Child's fork call should return 0. */
471 brid ret_from_trap; /* Do normal trap return */
472 nop;
473
Arnd Bergmanne5135882009-06-18 19:55:30 +0200474C_ENTRY(sys_vfork):
475 brid microblaze_vfork /* Do real work (tail-call) */
Michal Simekca545022009-05-26 16:30:21 +0200476 la r5, r1, PTO
Michal Simekca545022009-05-26 16:30:21 +0200477
Arnd Bergmanne5135882009-06-18 19:55:30 +0200478C_ENTRY(sys_clone):
Michal Simekca545022009-05-26 16:30:21 +0200479 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
480 lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
4811: la r7, r1, PTO; /* Arg 2: parent context */
482 add r8, r0, r0; /* Arg 3: (unused) */
483 add r9, r0, r0; /* Arg 4: (unused) */
484 add r10, r0, r0; /* Arg 5: (unused) */
485 brid do_fork /* Do real work (tail-call) */
486 nop;
487
Arnd Bergmanne5135882009-06-18 19:55:30 +0200488C_ENTRY(sys_execve):
Michal Simekca545022009-05-26 16:30:21 +0200489 la r8, r1, PTO; /* add user context as 4th arg */
Arnd Bergmanne5135882009-06-18 19:55:30 +0200490 brid microblaze_execve; /* Do real work (tail-call).*/
Michal Simekca545022009-05-26 16:30:21 +0200491 nop;
492
Michal Simekca545022009-05-26 16:30:21 +0200493C_ENTRY(sys_rt_sigreturn_wrapper):
494 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
495 swi r4, r1, PTO+PT_R4;
496 la r5, r1, PTO; /* add user context as 1st arg */
497 brlid r15, sys_rt_sigreturn /* Do real work */
498 nop;
499 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
500 lwi r4, r1, PTO+PT_R4;
501 bri ret_from_trap /* fall through will not work here due to align */
502 nop;
503
504/*
505 * HW EXCEPTION rutine start
506 */
507
508#define SAVE_STATE \
509 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
510 set_bip; /*equalize initial state for all possible entries*/\
511 clear_eip; \
512 enable_irq; \
513 set_ee; \
514 /* See if already in kernel mode.*/ \
515 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
516 beqi r11, 1f; /* Jump ahead if coming from user */\
517 /* Kernel-mode state save. */ \
518 /* Reload kernel stack-ptr. */ \
519 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
520 tophys(r1,r11); \
521 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
522 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
523 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
524 /* store return registers separately because \
525 * this macros is use for others exceptions */ \
526 swi r3, r1, PTO + PT_R3; \
527 swi r4, r1, PTO + PT_R4; \
528 SAVE_REGS \
529 /* PC, before IRQ/trap - this is one instruction above */ \
530 swi r17, r1, PTO+PT_PC; \
531 \
532 addi r11, r0, 1; /* Was in kernel-mode. */ \
533 swi r11, r1, PTO+PT_MODE; \
534 brid 2f; \
535 nop; /* Fill delay slot */ \
5361: /* User-mode state save. */ \
537 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
538 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
539 tophys(r1,r1); \
540 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
541 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
542 tophys(r1,r1); \
543 \
544 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
545 /* store return registers separately because this macros \
546 * is use for others exceptions */ \
547 swi r3, r1, PTO + PT_R3; \
548 swi r4, r1, PTO + PT_R4; \
549 SAVE_REGS \
550 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
551 swi r17, r1, PTO+PT_PC; \
552 \
553 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
554 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
555 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
556 addi r11, r0, 1; \
557 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
Michal Simekb1d70c62010-01-22 10:24:06 +01005582: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
Michal Simekca545022009-05-26 16:30:21 +0200559 /* Save away the syscall number. */ \
560 swi r0, r1, PTO+PT_R0; \
561 tovirt(r1,r1)
562
563C_ENTRY(full_exception_trap):
564 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
565 /* adjust exception address for privileged instruction
566 * for finding where is it */
567 addik r17, r17, -4
568 SAVE_STATE /* Save registers */
569 /* FIXME this can be store directly in PT_ESR reg.
570 * I tested it but there is a fault */
571 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
572 la r15, r0, ret_from_exc - 8
573 la r5, r1, PTO /* parameter struct pt_regs * regs */
574 mfs r6, resr
575 nop
576 mfs r7, rfsr; /* save FSR */
577 nop
Michal Simek131e4e92009-09-28 08:50:53 +0200578 mts rfsr, r0; /* Clear sticky fsr */
579 nop
Michal Simekca545022009-05-26 16:30:21 +0200580 la r12, r0, full_exception
581 set_vms;
582 rtbd r12, 0;
583 nop;
584
585/*
586 * Unaligned data trap.
587 *
588 * Unaligned data trap last on 4k page is handled here.
589 *
590 * Trap entered via exception, so EE bit is set, and interrupts
591 * are masked. This is nice, means we don't have to CLI before state save
592 *
593 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
594 */
595C_ENTRY(unaligned_data_trap):
596 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
597 SAVE_STATE /* Save registers.*/
598 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
599 la r15, r0, ret_from_exc-8
600 mfs r3, resr /* ESR */
601 nop
602 mfs r4, rear /* EAR */
603 nop
604 la r7, r1, PTO /* parameter struct pt_regs * regs */
605 la r12, r0, _unaligned_data_exception
606 set_vms;
607 rtbd r12, 0; /* interrupts enabled */
608 nop;
609
610/*
611 * Page fault traps.
612 *
613 * If the real exception handler (from hw_exception_handler.S) didn't find
614 * the mapping for the process, then we're thrown here to handle such situation.
615 *
616 * Trap entered via exceptions, so EE bit is set, and interrupts
617 * are masked. This is nice, means we don't have to CLI before state save
618 *
619 * Build a standard exception frame for TLB Access errors. All TLB exceptions
620 * will bail out to this point if they can't resolve the lightweight TLB fault.
621 *
622 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
623 * void do_page_fault(struct pt_regs *regs,
624 * unsigned long address,
625 * unsigned long error_code)
626 */
627/* data and intruction trap - which is choose is resolved int fault.c */
628C_ENTRY(page_fault_data_trap):
629 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
630 SAVE_STATE /* Save registers.*/
631 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
632 la r15, r0, ret_from_exc-8
633 la r5, r1, PTO /* parameter struct pt_regs * regs */
634 mfs r6, rear /* parameter unsigned long address */
635 nop
636 mfs r7, resr /* parameter unsigned long error_code */
637 nop
638 la r12, r0, do_page_fault
639 set_vms;
640 rtbd r12, 0; /* interrupts enabled */
641 nop;
642
643C_ENTRY(page_fault_instr_trap):
644 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
645 SAVE_STATE /* Save registers.*/
646 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
647 la r15, r0, ret_from_exc-8
648 la r5, r1, PTO /* parameter struct pt_regs * regs */
649 mfs r6, rear /* parameter unsigned long address */
650 nop
651 ori r7, r0, 0 /* parameter unsigned long error_code */
652 la r12, r0, do_page_fault
653 set_vms;
654 rtbd r12, 0; /* interrupts enabled */
655 nop;
656
657/* Entry point used to return from an exception. */
658C_ENTRY(ret_from_exc):
659 set_bip; /* Ints masked for state restore*/
660 lwi r11, r1, PTO+PT_MODE;
661 bnei r11, 2f; /* See if returning to kernel mode, */
662 /* ... if so, skip resched &c. */
663
664 /* We're returning to user mode, so check for various conditions that
665 trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100666 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200667 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
668 andi r11, r11, _TIF_NEED_RESCHED;
669 beqi r11, 5f;
670
671/* Call the scheduler before returning from a syscall/trap. */
672 bralid r15, schedule; /* Call scheduler */
673 nop; /* delay slot */
674
675 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01006765: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200677 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
678 andi r11, r11, _TIF_SIGPENDING;
679 beqi r11, 1f; /* Signals to handle, handle them */
680
681 /*
682 * Handle a signal return; Pending signals should be in r18.
683 *
684 * Not all registers are saved by the normal trap/interrupt entry
685 * points (for instance, call-saved registers (because the normal
686 * C-compiler calling sequence in the kernel makes sure they're
687 * preserved), and call-clobbered registers in the case of
688 * traps), but signal handlers may want to examine or change the
689 * complete register state. Here we save anything not saved by
690 * the normal entry sequence, so that it may be safely restored
691 * (in a possibly modified form) after do_signal returns.
692 * store return registers separately because this macros is use
693 * for others exceptions */
Michal Simekca545022009-05-26 16:30:21 +0200694 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
695 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
696 addi r7, r0, 0; /* Arg 3: int in_syscall */
697 bralid r15, do_signal; /* Handle any signals */
698 nop;
Michal Simekca545022009-05-26 16:30:21 +0200699
700/* Finally, return to user state. */
7011: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
Michal Simek8633beb2010-02-22 13:24:43 +0100702 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200703 VM_OFF;
704 tophys(r1,r1);
705
706 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
707 lwi r4, r1, PTO+PT_R4;
708 RESTORE_REGS;
709 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
710
711 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
712 bri 6f;
713/* Return to kernel state. */
7142: VM_OFF;
715 tophys(r1,r1);
716 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
717 lwi r4, r1, PTO+PT_R4;
718 RESTORE_REGS;
719 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
720
721 tovirt(r1,r1);
7226:
723EXC_return: /* Make global symbol for debugging */
724 rtbd r14, 0; /* Instructions to return from an IRQ */
725 nop;
726
727/*
728 * HW EXCEPTION rutine end
729 */
730
731/*
732 * Hardware maskable interrupts.
733 *
734 * The stack-pointer (r1) should have already been saved to the memory
735 * location PER_CPU(ENTRY_SP).
736 */
737C_ENTRY(_interrupt):
738/* MS: we are in physical address */
739/* Save registers, switch to proper stack, convert SP to virtual.*/
740 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
741 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
742 /* MS: See if already in kernel mode. */
743 lwi r11, r0, TOPHYS(PER_CPU(KM));
744 beqi r11, 1f; /* MS: Jump ahead if coming from user */
745
746/* Kernel-mode state save. */
747 or r11, r1, r0
748 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
749/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
750 swi r11, r1, (PT_R1 - PT_SIZE);
751/* MS: restore r11 because of saving in SAVE_REGS */
752 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
753 /* save registers */
754/* MS: Make room on the stack -> activation record */
755 addik r1, r1, -STATE_SAVE_SIZE;
756/* MS: store return registers separately because
757 * this macros is use for others exceptions */
758 swi r3, r1, PTO + PT_R3;
759 swi r4, r1, PTO + PT_R4;
760 SAVE_REGS
761 /* MS: store mode */
762 addi r11, r0, 1; /* MS: Was in kernel-mode. */
763 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
764 brid 2f;
765 nop; /* MS: Fill delay slot */
766
7671:
768/* User-mode state save. */
769/* MS: restore r11 -> FIXME move before SAVE_REG */
770 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
771 /* MS: get the saved current */
772 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
773 tophys(r1,r1);
774 lwi r1, r1, TS_THREAD_INFO;
775 addik r1, r1, THREAD_SIZE;
776 tophys(r1,r1);
777 /* save registers */
778 addik r1, r1, -STATE_SAVE_SIZE;
779 swi r3, r1, PTO+PT_R3;
780 swi r4, r1, PTO+PT_R4;
781 SAVE_REGS
782 /* calculate mode */
783 swi r0, r1, PTO + PT_MODE;
784 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
785 swi r11, r1, PTO+PT_R1;
786 /* setup kernel mode to KM */
787 addi r11, r0, 1;
788 swi r11, r0, TOPHYS(PER_CPU(KM));
789
7902:
Michal Simekb1d70c62010-01-22 10:24:06 +0100791 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200792 swi r0, r1, PTO + PT_R0;
793 tovirt(r1,r1)
794 la r5, r1, PTO;
795 set_vms;
796 la r11, r0, do_IRQ;
797 la r15, r0, irq_call;
798irq_call:rtbd r11, 0;
799 nop;
800
801/* MS: we are in virtual mode */
802ret_from_irq:
803 lwi r11, r1, PTO + PT_MODE;
804 bnei r11, 2f;
805
Michal Simekb1d70c62010-01-22 10:24:06 +0100806 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200807 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
808 andi r11, r11, _TIF_NEED_RESCHED;
809 beqi r11, 5f
810 bralid r15, schedule;
811 nop; /* delay slot */
812
813 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01008145: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200815 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
816 andi r11, r11, _TIF_SIGPENDING;
817 beqid r11, no_intr_resched
818/* Handle a signal return; Pending signals should be in r18. */
819 addi r7, r0, 0; /* Arg 3: int in_syscall */
820 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
821 bralid r15, do_signal; /* Handle any signals */
822 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
823
824/* Finally, return to user state. */
825no_intr_resched:
826 /* Disable interrupts, we are now committed to the state restore */
827 disable_irq
828 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
Michal Simek8633beb2010-02-22 13:24:43 +0100829 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
Michal Simekca545022009-05-26 16:30:21 +0200830 VM_OFF;
831 tophys(r1,r1);
832 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
833 lwi r4, r1, PTO + PT_R4;
834 RESTORE_REGS
835 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
836 lwi r1, r1, PT_R1 - PT_SIZE;
837 bri 6f;
838/* MS: Return to kernel state. */
Michal Simek77753792010-01-12 09:55:10 +01008392:
840#ifdef CONFIG_PREEMPT
Michal Simekb1d70c62010-01-22 10:24:06 +0100841 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simek77753792010-01-12 09:55:10 +0100842 /* MS: get preempt_count from thread info */
843 lwi r5, r11, TI_PREEMPT_COUNT;
844 bgti r5, restore;
845
846 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
847 andi r5, r5, _TIF_NEED_RESCHED;
848 beqi r5, restore /* if zero jump over */
849
850preempt:
851 /* interrupts are off that's why I am calling preempt_chedule_irq */
852 bralid r15, preempt_schedule_irq
853 nop
Michal Simekb1d70c62010-01-22 10:24:06 +0100854 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek77753792010-01-12 09:55:10 +0100855 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
856 andi r5, r5, _TIF_NEED_RESCHED;
857 bnei r5, preempt /* if non zero jump to resched */
858restore:
859#endif
860 VM_OFF /* MS: turn off MMU */
Michal Simekca545022009-05-26 16:30:21 +0200861 tophys(r1,r1)
862 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
863 lwi r4, r1, PTO + PT_R4;
864 RESTORE_REGS
865 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
866 tovirt(r1,r1);
8676:
868IRQ_return: /* MS: Make global symbol for debugging */
869 rtid r14, 0
870 nop
871
872/*
873 * `Debug' trap
874 * We enter dbtrap in "BIP" (breakpoint) mode.
875 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
876 * original dbtrap.
877 * however, wait to save state first
878 */
879C_ENTRY(_debug_exception):
880 /* BIP bit is set on entry, no interrupts can occur */
881 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
882
883 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
884 set_bip; /*equalize initial state for all possible entries*/
885 clear_eip;
886 enable_irq;
887 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
888 beqi r11, 1f; /* Jump ahead if coming from user */
889 /* Kernel-mode state save. */
890 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
891 tophys(r1,r11);
892 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
893 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
894
895 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
896 swi r3, r1, PTO + PT_R3;
897 swi r4, r1, PTO + PT_R4;
898 SAVE_REGS;
899
900 addi r11, r0, 1; /* Was in kernel-mode. */
901 swi r11, r1, PTO + PT_MODE;
902 brid 2f;
903 nop; /* Fill delay slot */
9041: /* User-mode state save. */
905 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
906 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
907 tophys(r1,r1);
908 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
909 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
910 tophys(r1,r1);
911
912 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
913 swi r3, r1, PTO + PT_R3;
914 swi r4, r1, PTO + PT_R4;
915 SAVE_REGS;
916
917 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
918 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
919 swi r11, r1, PTO+PT_R1; /* Store user SP. */
920 addi r11, r0, 1;
921 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
Michal Simekb1d70c62010-01-22 10:24:06 +01009222: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200923 /* Save away the syscall number. */
924 swi r0, r1, PTO+PT_R0;
925 tovirt(r1,r1)
926
927 addi r5, r0, SIGTRAP /* send the trap signal */
928 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
929 addk r7, r0, r0 /* 3rd param zero */
930
931 set_vms;
932 la r11, r0, send_sig;
933 la r15, r0, dbtrap_call;
934dbtrap_call: rtbd r11, 0;
935 nop;
936
937 set_bip; /* Ints masked for state restore*/
938 lwi r11, r1, PTO+PT_MODE;
939 bnei r11, 2f;
940
941 /* Get current task ptr into r11 */
Michal Simekb1d70c62010-01-22 10:24:06 +0100942 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200943 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
944 andi r11, r11, _TIF_NEED_RESCHED;
945 beqi r11, 5f;
946
947/* Call the scheduler before returning from a syscall/trap. */
948
949 bralid r15, schedule; /* Call scheduler */
950 nop; /* delay slot */
951 /* XXX Is PT_DTRACE handling needed here? */
952 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
953
954 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01009555: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200956 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
957 andi r11, r11, _TIF_SIGPENDING;
958 beqi r11, 1f; /* Signals to handle, handle them */
959
960/* Handle a signal return; Pending signals should be in r18. */
961 /* Not all registers are saved by the normal trap/interrupt entry
962 points (for instance, call-saved registers (because the normal
963 C-compiler calling sequence in the kernel makes sure they're
964 preserved), and call-clobbered registers in the case of
965 traps), but signal handlers may want to examine or change the
966 complete register state. Here we save anything not saved by
967 the normal entry sequence, so that it may be safely restored
968 (in a possibly modified form) after do_signal returns. */
969
970 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
971 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
972 addi r7, r0, 0; /* Arg 3: int in_syscall */
973 bralid r15, do_signal; /* Handle any signals */
974 nop;
975
976
977/* Finally, return to user state. */
9781: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
Michal Simek8633beb2010-02-22 13:24:43 +0100979 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200980 VM_OFF;
981 tophys(r1,r1);
982
983 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
984 lwi r4, r1, PTO+PT_R4;
985 RESTORE_REGS
986 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
987
988
989 lwi r1, r1, PT_R1 - PT_SIZE;
990 /* Restore user stack pointer. */
991 bri 6f;
992
993/* Return to kernel state. */
9942: VM_OFF;
995 tophys(r1,r1);
996 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
997 lwi r4, r1, PTO+PT_R4;
998 RESTORE_REGS
999 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
1000
1001 tovirt(r1,r1);
10026:
1003DBTRAP_return: /* Make global symbol for debugging */
1004 rtbd r14, 0; /* Instructions to return from an IRQ */
1005 nop;
1006
1007
1008
1009ENTRY(_switch_to)
1010 /* prepare return value */
Michal Simekb1d70c62010-01-22 10:24:06 +01001011 addk r3, r0, CURRENT_TASK
Michal Simekca545022009-05-26 16:30:21 +02001012
1013 /* save registers in cpu_context */
1014 /* use r11 and r12, volatile registers, as temp register */
1015 /* give start of cpu_context for previous process */
1016 addik r11, r5, TI_CPU_CONTEXT
1017 swi r1, r11, CC_R1
1018 swi r2, r11, CC_R2
1019 /* skip volatile registers.
1020 * they are saved on stack when we jumped to _switch_to() */
1021 /* dedicated registers */
1022 swi r13, r11, CC_R13
1023 swi r14, r11, CC_R14
1024 swi r15, r11, CC_R15
1025 swi r16, r11, CC_R16
1026 swi r17, r11, CC_R17
1027 swi r18, r11, CC_R18
1028 /* save non-volatile registers */
1029 swi r19, r11, CC_R19
1030 swi r20, r11, CC_R20
1031 swi r21, r11, CC_R21
1032 swi r22, r11, CC_R22
1033 swi r23, r11, CC_R23
1034 swi r24, r11, CC_R24
1035 swi r25, r11, CC_R25
1036 swi r26, r11, CC_R26
1037 swi r27, r11, CC_R27
1038 swi r28, r11, CC_R28
1039 swi r29, r11, CC_R29
1040 swi r30, r11, CC_R30
1041 /* special purpose registers */
1042 mfs r12, rmsr
1043 nop
1044 swi r12, r11, CC_MSR
1045 mfs r12, rear
1046 nop
1047 swi r12, r11, CC_EAR
1048 mfs r12, resr
1049 nop
1050 swi r12, r11, CC_ESR
1051 mfs r12, rfsr
1052 nop
1053 swi r12, r11, CC_FSR
1054
Michal Simekb1d70c62010-01-22 10:24:06 +01001055 /* update r31, the current-give me pointer to task which will be next */
1056 lwi CURRENT_TASK, r6, TI_TASK
Michal Simekca545022009-05-26 16:30:21 +02001057 /* stored it to current_save too */
Michal Simekb1d70c62010-01-22 10:24:06 +01001058 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
Michal Simekca545022009-05-26 16:30:21 +02001059
1060 /* get new process' cpu context and restore */
1061 /* give me start where start context of next task */
1062 addik r11, r6, TI_CPU_CONTEXT
1063
1064 /* non-volatile registers */
1065 lwi r30, r11, CC_R30
1066 lwi r29, r11, CC_R29
1067 lwi r28, r11, CC_R28
1068 lwi r27, r11, CC_R27
1069 lwi r26, r11, CC_R26
1070 lwi r25, r11, CC_R25
1071 lwi r24, r11, CC_R24
1072 lwi r23, r11, CC_R23
1073 lwi r22, r11, CC_R22
1074 lwi r21, r11, CC_R21
1075 lwi r20, r11, CC_R20
1076 lwi r19, r11, CC_R19
1077 /* dedicated registers */
1078 lwi r18, r11, CC_R18
1079 lwi r17, r11, CC_R17
1080 lwi r16, r11, CC_R16
1081 lwi r15, r11, CC_R15
1082 lwi r14, r11, CC_R14
1083 lwi r13, r11, CC_R13
1084 /* skip volatile registers */
1085 lwi r2, r11, CC_R2
1086 lwi r1, r11, CC_R1
1087
1088 /* special purpose registers */
1089 lwi r12, r11, CC_FSR
1090 mts rfsr, r12
1091 nop
1092 lwi r12, r11, CC_MSR
1093 mts rmsr, r12
1094 nop
1095
1096 rtsd r15, 8
1097 nop
1098
1099ENTRY(_reset)
1100 brai 0x70; /* Jump back to FS-boot */
1101
1102ENTRY(_break)
1103 mfs r5, rmsr
1104 nop
1105 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1106 mfs r5, resr
1107 nop
1108 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1109 bri 0
1110
1111 /* These are compiled and loaded into high memory, then
1112 * copied into place in mach_early_setup */
1113 .section .init.ivt, "ax"
1114 .org 0x0
1115 /* this is very important - here is the reset vector */
1116 /* in current MMU branch you don't care what is here - it is
1117 * used from bootloader site - but this is correct for FS-BOOT */
1118 brai 0x70
1119 nop
1120 brai TOPHYS(_user_exception); /* syscall handler */
1121 brai TOPHYS(_interrupt); /* Interrupt handler */
1122 brai TOPHYS(_break); /* nmi trap handler */
1123 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1124
1125 .org 0x60
1126 brai TOPHYS(_debug_exception); /* debug trap handler*/
1127
1128.section .rodata,"a"
1129#include "syscall_table.S"
1130
1131syscall_table_size=(.-sys_call_table)
1132