blob: 5529f64e97e898bdc36318b4589512b02c84a57b [file] [log] [blame]
Michal Simekca545022009-05-26 16:30:21 +02001/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
Michal Simek11d51362009-12-07 08:21:34 +010034#undef DEBUG
35
Michal Simekca545022009-05-26 16:30:21 +020036/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
Michal Simekb3180672010-06-22 17:46:27 +020094 .macro clear_ums
95 msrclr r11, MSR_UMS
96 nop
97 .endm
98
Michal Simekca545022009-05-26 16:30:21 +020099 .macro clear_vms_ums
Michal Simek3fbd93e2010-06-22 13:51:50 +0200100 msrclr r11, MSR_VMS | MSR_UMS
Michal Simekca545022009-05-26 16:30:21 +0200101 nop
102 .endm
103#else
104 .macro clear_bip
105 mfs r11, rmsr
106 nop
107 andi r11, r11, ~MSR_BIP
108 mts rmsr, r11
109 nop
110 .endm
111
112 .macro set_bip
113 mfs r11, rmsr
114 nop
115 ori r11, r11, MSR_BIP
116 mts rmsr, r11
117 nop
118 .endm
119
120 .macro clear_eip
121 mfs r11, rmsr
122 nop
123 andi r11, r11, ~MSR_EIP
124 mts rmsr, r11
125 nop
126 .endm
127
128 .macro set_ee
129 mfs r11, rmsr
130 nop
131 ori r11, r11, MSR_EE
132 mts rmsr, r11
133 nop
134 .endm
135
136 .macro disable_irq
137 mfs r11, rmsr
138 nop
139 andi r11, r11, ~MSR_IE
140 mts rmsr, r11
141 nop
142 .endm
143
144 .macro enable_irq
145 mfs r11, rmsr
146 nop
147 ori r11, r11, MSR_IE
148 mts rmsr, r11
149 nop
150 .endm
151
152 .macro set_ums
153 mfs r11, rmsr
154 nop
155 ori r11, r11, MSR_VMS
156 andni r11, r11, MSR_UMS
157 mts rmsr, r11
158 nop
159 .endm
160
161 .macro set_vms
162 mfs r11, rmsr
163 nop
164 ori r11, r11, MSR_VMS
165 andni r11, r11, MSR_UMS
166 mts rmsr, r11
167 nop
168 .endm
169
Michal Simekb3180672010-06-22 17:46:27 +0200170 .macro clear_ums
171 mfs r11, rmsr
172 nop
173 andni r11, r11, MSR_UMS
174 mts rmsr,r11
175 nop
176 .endm
177
Michal Simekca545022009-05-26 16:30:21 +0200178 .macro clear_vms_ums
179 mfs r11, rmsr
180 nop
181 andni r11, r11, (MSR_VMS|MSR_UMS)
182 mts rmsr,r11
183 nop
184 .endm
185#endif
186
187/* Define how to call high-level functions. With MMU, virtual mode must be
188 * enabled when calling the high-level function. Clobbers R11.
189 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
190 */
191
192/* turn on virtual protected mode save */
193#define VM_ON \
Michal Simeka4a94db2010-06-22 13:15:53 +0200194 set_ums; \
Michal Simekca545022009-05-26 16:30:21 +0200195 rted r0, 2f; \
Michal Simeka4a94db2010-06-22 13:15:53 +0200196 nop; \
1972:
Michal Simekca545022009-05-26 16:30:21 +0200198
199/* turn off virtual protected mode save and user mode save*/
200#define VM_OFF \
Michal Simeka4a94db2010-06-22 13:15:53 +0200201 clear_vms_ums; \
Michal Simekca545022009-05-26 16:30:21 +0200202 rted r0, TOPHYS(1f); \
Michal Simeka4a94db2010-06-22 13:15:53 +0200203 nop; \
2041:
Michal Simekca545022009-05-26 16:30:21 +0200205
206#define SAVE_REGS \
207 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
Michal Simek36f60952010-06-22 13:27:43 +0200208 swi r3, r1, PTO+PT_R3; \
209 swi r4, r1, PTO+PT_R4; \
Michal Simekca545022009-05-26 16:30:21 +0200210 swi r5, r1, PTO+PT_R5; \
211 swi r6, r1, PTO+PT_R6; \
212 swi r7, r1, PTO+PT_R7; \
213 swi r8, r1, PTO+PT_R8; \
214 swi r9, r1, PTO+PT_R9; \
215 swi r10, r1, PTO+PT_R10; \
216 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
217 swi r12, r1, PTO+PT_R12; \
218 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
219 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
220 swi r15, r1, PTO+PT_R15; /* Save LP */ \
221 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
222 swi r19, r1, PTO+PT_R19; \
223 swi r20, r1, PTO+PT_R20; \
224 swi r21, r1, PTO+PT_R21; \
225 swi r22, r1, PTO+PT_R22; \
226 swi r23, r1, PTO+PT_R23; \
227 swi r24, r1, PTO+PT_R24; \
228 swi r25, r1, PTO+PT_R25; \
229 swi r26, r1, PTO+PT_R26; \
230 swi r27, r1, PTO+PT_R27; \
231 swi r28, r1, PTO+PT_R28; \
232 swi r29, r1, PTO+PT_R29; \
233 swi r30, r1, PTO+PT_R30; \
234 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
235 mfs r11, rmsr; /* save MSR */ \
236 nop; \
237 swi r11, r1, PTO+PT_MSR;
238
239#define RESTORE_REGS \
240 lwi r11, r1, PTO+PT_MSR; \
241 mts rmsr , r11; \
242 nop; \
243 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
Michal Simek36f60952010-06-22 13:27:43 +0200244 lwi r3, r1, PTO+PT_R3; \
245 lwi r4, r1, PTO+PT_R4; \
Michal Simekca545022009-05-26 16:30:21 +0200246 lwi r5, r1, PTO+PT_R5; \
247 lwi r6, r1, PTO+PT_R6; \
248 lwi r7, r1, PTO+PT_R7; \
249 lwi r8, r1, PTO+PT_R8; \
250 lwi r9, r1, PTO+PT_R9; \
251 lwi r10, r1, PTO+PT_R10; \
252 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
253 lwi r12, r1, PTO+PT_R12; \
254 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
255 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
256 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
257 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
258 lwi r19, r1, PTO+PT_R19; \
259 lwi r20, r1, PTO+PT_R20; \
260 lwi r21, r1, PTO+PT_R21; \
261 lwi r22, r1, PTO+PT_R22; \
262 lwi r23, r1, PTO+PT_R23; \
263 lwi r24, r1, PTO+PT_R24; \
264 lwi r25, r1, PTO+PT_R25; \
265 lwi r26, r1, PTO+PT_R26; \
266 lwi r27, r1, PTO+PT_R27; \
267 lwi r28, r1, PTO+PT_R28; \
268 lwi r29, r1, PTO+PT_R29; \
269 lwi r30, r1, PTO+PT_R30; \
270 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
271
272.text
273
274/*
275 * User trap.
276 *
277 * System calls are handled here.
278 *
279 * Syscall protocol:
280 * Syscall number in r12, args in r5-r10
281 * Return value in r3
282 *
283 * Trap entered via brki instruction, so BIP bit is set, and interrupts
284 * are masked. This is nice, means we don't have to CLI before state save
285 */
286C_ENTRY(_user_exception):
287 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
288 addi r14, r14, 4 /* return address is 4 byte after call */
Michal Simekca545022009-05-26 16:30:21 +0200289
Michal Simek653e4472010-06-22 14:51:45 +0200290 mfs r1, rmsr
Michal Simek5c0d72b2010-06-22 14:00:12 +0200291 nop
Michal Simek653e4472010-06-22 14:51:45 +0200292 andi r1, r1, MSR_UMS
293 bnei r1, 1f
Michal Simek5c0d72b2010-06-22 14:00:12 +0200294
295/* Kernel-mode state save - kernel execve */
Michal Simek653e4472010-06-22 14:51:45 +0200296 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
297 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200298
299 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
300 SAVE_REGS
301
Michal Simek77f6d222010-06-22 16:39:56 +0200302 swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
Michal Simekca545022009-05-26 16:30:21 +0200303 brid 2f;
304 nop; /* Fill delay slot */
305
306/* User-mode state save. */
3071:
Michal Simekca545022009-05-26 16:30:21 +0200308 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
309 tophys(r1,r1);
310 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
311/* calculate kernel stack pointer from task struct 8k */
312 addik r1, r1, THREAD_SIZE;
313 tophys(r1,r1);
314
315 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
316 SAVE_REGS
317
Michal Simek77f6d222010-06-22 16:39:56 +0200318 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
Michal Simekca545022009-05-26 16:30:21 +0200319 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
320 swi r11, r1, PTO+PT_R1; /* Store user SP. */
Michal Simekb1d70c62010-01-22 10:24:06 +01003212: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200322 /* Save away the syscall number. */
323 swi r12, r1, PTO+PT_R0;
324 tovirt(r1,r1)
325
Michal Simekca545022009-05-26 16:30:21 +0200326/* where the trap should return need -8 to adjust for rtsd r15, 8*/
327/* Jump to the appropriate function for the system call number in r12
328 * (r12 is not preserved), or return an error if r12 is not valid. The LP
329 * register should point to the location where
330 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
Michal Simek23575482009-08-24 13:26:04 +0200331
332 # Step into virtual mode.
333 set_vms;
334 addik r11, r0, 3f
335 rtid r11, 0
336 nop
3373:
Michal Simekb1d70c62010-01-22 10:24:06 +0100338 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200339 lwi r11, r11, TI_FLAGS /* get flags in thread info */
340 andi r11, r11, _TIF_WORK_SYSCALL_MASK
341 beqi r11, 4f
342
343 addik r3, r0, -ENOSYS
344 swi r3, r1, PTO + PT_R3
345 brlid r15, do_syscall_trace_enter
346 addik r5, r1, PTO + PT_R0
347
348 # do_syscall_trace_enter returns the new syscall nr.
349 addk r12, r0, r3
350 lwi r5, r1, PTO+PT_R5;
351 lwi r6, r1, PTO+PT_R6;
352 lwi r7, r1, PTO+PT_R7;
353 lwi r8, r1, PTO+PT_R8;
354 lwi r9, r1, PTO+PT_R9;
355 lwi r10, r1, PTO+PT_R10;
3564:
357/* Jump to the appropriate function for the system call number in r12
358 * (r12 is not preserved), or return an error if r12 is not valid.
359 * The LP register should point to the location where the called function
360 * should return. [note that MAKE_SYS_CALL uses label 1] */
361 /* See if the system call number is valid */
Michal Simekca545022009-05-26 16:30:21 +0200362 addi r11, r12, -__NR_syscalls;
Michal Simek23575482009-08-24 13:26:04 +0200363 bgei r11,5f;
Michal Simekca545022009-05-26 16:30:21 +0200364 /* Figure out which function to use for this system call. */
365 /* Note Microblaze barrel shift is optional, so don't rely on it */
366 add r12, r12, r12; /* convert num -> ptr */
367 add r12, r12, r12;
368
Michal Simek11d51362009-12-07 08:21:34 +0100369#ifdef DEBUG
Michal Simekca545022009-05-26 16:30:21 +0200370 /* Trac syscalls and stored them to r0_ram */
Michal Simek23575482009-08-24 13:26:04 +0200371 lwi r3, r12, 0x400 + r0_ram
Michal Simekca545022009-05-26 16:30:21 +0200372 addi r3, r3, 1
Michal Simek23575482009-08-24 13:26:04 +0200373 swi r3, r12, 0x400 + r0_ram
Michal Simek11d51362009-12-07 08:21:34 +0100374#endif
Michal Simekca545022009-05-26 16:30:21 +0200375
Michal Simek23575482009-08-24 13:26:04 +0200376 # Find and jump into the syscall handler.
377 lwi r12, r12, sys_call_table
378 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200379 addi r15, r0, ret_from_trap-8
Michal Simek23575482009-08-24 13:26:04 +0200380 bra r12
381
Michal Simekca545022009-05-26 16:30:21 +0200382 /* The syscall number is invalid, return an error. */
Michal Simek23575482009-08-24 13:26:04 +02003835:
Michal Simekca545022009-05-26 16:30:21 +0200384 addi r3, r0, -ENOSYS;
385 rtsd r15,8; /* looks like a normal subroutine return */
386 or r0, r0, r0
387
388
Michal Simek23575482009-08-24 13:26:04 +0200389/* Entry point used to return from a syscall/trap */
Michal Simekca545022009-05-26 16:30:21 +0200390/* We re-enable BIP bit before state restore */
391C_ENTRY(ret_from_trap):
Michal Simekb1d70c62010-01-22 10:24:06 +0100392 swi r3, r1, PTO + PT_R3
393 swi r4, r1, PTO + PT_R4
394
Michal Simek77f6d222010-06-22 16:39:56 +0200395 lwi r11, r1, PTO + PT_MODE;
Michal Simek36f60952010-06-22 13:27:43 +0200396/* See if returning to kernel mode, if so, skip resched &c. */
397 bnei r11, 2f;
Michal Simekca545022009-05-26 16:30:21 +0200398 /* We're returning to user mode, so check for various conditions that
399 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100400 /* FIXME: Restructure all these flag checks. */
401 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200402 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
403 andi r11, r11, _TIF_WORK_SYSCALL_MASK
404 beqi r11, 1f
405
Michal Simek23575482009-08-24 13:26:04 +0200406 brlid r15, do_syscall_trace_leave
407 addik r5, r1, PTO + PT_R0
Michal Simek23575482009-08-24 13:26:04 +02004081:
Michal Simek23575482009-08-24 13:26:04 +0200409 /* We're returning to user mode, so check for various conditions that
410 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100411 /* get thread info from current task */
412 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200413 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
414 andi r11, r11, _TIF_NEED_RESCHED;
415 beqi r11, 5f;
416
Michal Simekca545022009-05-26 16:30:21 +0200417 bralid r15, schedule; /* Call scheduler */
418 nop; /* delay slot */
Michal Simekca545022009-05-26 16:30:21 +0200419
420 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01004215: /* get thread info from current task*/
422 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200423 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
424 andi r11, r11, _TIF_SIGPENDING;
425 beqi r11, 1f; /* Signals to handle, handle them */
426
Michal Simekb9ea77e2010-07-28 12:40:02 +0200427 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200428 addi r7, r0, 1; /* Arg 3: int in_syscall */
429 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100430 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekb1d70c62010-01-22 10:24:06 +0100431
432/* Finally, return to user state. */
Michal Simek96014cc2010-06-22 14:05:43 +02004331: set_bip; /* Ints masked for state restore */
Michal Simek8633beb2010-02-22 13:24:43 +0100434 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200435 VM_OFF;
436 tophys(r1,r1);
437 RESTORE_REGS;
438 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
439 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
440 bri 6f;
441
442/* Return to kernel state. */
Michal Simek96014cc2010-06-22 14:05:43 +02004432: set_bip; /* Ints masked for state restore */
444 VM_OFF;
Michal Simekca545022009-05-26 16:30:21 +0200445 tophys(r1,r1);
446 RESTORE_REGS;
447 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
448 tovirt(r1,r1);
4496:
450TRAP_return: /* Make global symbol for debugging */
451 rtbd r14, 0; /* Instructions to return from an IRQ */
452 nop;
453
454
455/* These syscalls need access to the struct pt_regs on the stack, so we
456 implement them in assembly (they're basically all wrappers anyway). */
457
458C_ENTRY(sys_fork_wrapper):
459 addi r5, r0, SIGCHLD /* Arg 0: flags */
460 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200461 addik r7, r1, PTO /* Arg 2: parent context */
Michal Simekca545022009-05-26 16:30:21 +0200462 add r8. r0, r0 /* Arg 3: (unused) */
463 add r9, r0, r0; /* Arg 4: (unused) */
464 add r10, r0, r0; /* Arg 5: (unused) */
465 brid do_fork /* Do real work (tail-call) */
466 nop;
467
468/* This the initial entry point for a new child thread, with an appropriate
469 stack in place that makes it look the the child is in the middle of an
470 syscall. This function is actually `returned to' from switch_thread
471 (copy_thread makes ret_from_fork the return address in each new thread's
472 saved context). */
473C_ENTRY(ret_from_fork):
474 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
475 add r3, r5, r0; /* switch_thread returns the prev task */
476 /* ( in the delay slot ) */
477 add r3, r0, r0; /* Child's fork call should return 0. */
478 brid ret_from_trap; /* Do normal trap return */
479 nop;
480
Arnd Bergmanne5135882009-06-18 19:55:30 +0200481C_ENTRY(sys_vfork):
482 brid microblaze_vfork /* Do real work (tail-call) */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200483 addik r5, r1, PTO
Michal Simekca545022009-05-26 16:30:21 +0200484
Arnd Bergmanne5135882009-06-18 19:55:30 +0200485C_ENTRY(sys_clone):
Michal Simekca545022009-05-26 16:30:21 +0200486 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
Michal Simek570e3e22010-06-04 13:06:27 +0200487 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
Michal Simekb9ea77e2010-07-28 12:40:02 +02004881: addik r7, r1, PTO; /* Arg 2: parent context */
489 add r8, r0, r0; /* Arg 3: (unused) */
490 add r9, r0, r0; /* Arg 4: (unused) */
491 add r10, r0, r0; /* Arg 5: (unused) */
492 brid do_fork /* Do real work (tail-call) */
493 nop;
Michal Simekca545022009-05-26 16:30:21 +0200494
Arnd Bergmanne5135882009-06-18 19:55:30 +0200495C_ENTRY(sys_execve):
Michal Simekb9ea77e2010-07-28 12:40:02 +0200496 addik r8, r1, PTO; /* add user context as 4th arg */
Arnd Bergmanne5135882009-06-18 19:55:30 +0200497 brid microblaze_execve; /* Do real work (tail-call).*/
Michal Simekca545022009-05-26 16:30:21 +0200498 nop;
499
Michal Simekca545022009-05-26 16:30:21 +0200500C_ENTRY(sys_rt_sigreturn_wrapper):
501 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
502 swi r4, r1, PTO+PT_R4;
Michal Simekb9ea77e2010-07-28 12:40:02 +0200503 addik r5, r1, PTO; /* add user context as 1st arg */
Michal Simekca545022009-05-26 16:30:21 +0200504 brlid r15, sys_rt_sigreturn /* Do real work */
505 nop;
506 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
507 lwi r4, r1, PTO+PT_R4;
508 bri ret_from_trap /* fall through will not work here due to align */
509 nop;
510
511/*
512 * HW EXCEPTION rutine start
513 */
514
515#define SAVE_STATE \
Michal Simek63708f62010-06-22 14:13:09 +0200516 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
Michal Simekca545022009-05-26 16:30:21 +0200517 /* See if already in kernel mode.*/ \
Michal Simek653e4472010-06-22 14:51:45 +0200518 mfs r1, rmsr; \
Michal Simek5c0d72b2010-06-22 14:00:12 +0200519 nop; \
Michal Simek653e4472010-06-22 14:51:45 +0200520 andi r1, r1, MSR_UMS; \
521 bnei r1, 1f; \
Michal Simekca545022009-05-26 16:30:21 +0200522 /* Kernel-mode state save. */ \
523 /* Reload kernel stack-ptr. */ \
Michal Simek653e4472010-06-22 14:51:45 +0200524 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
525 tophys(r1,r1); \
Michal Simekca545022009-05-26 16:30:21 +0200526 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
Michal Simekca545022009-05-26 16:30:21 +0200527 SAVE_REGS \
Michal Simek77f6d222010-06-22 16:39:56 +0200528 swi r1, r1, PTO+PT_MODE; \
Michal Simekca545022009-05-26 16:30:21 +0200529 brid 2f; \
530 nop; /* Fill delay slot */ \
5311: /* User-mode state save. */ \
Michal Simekca545022009-05-26 16:30:21 +0200532 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
533 tophys(r1,r1); \
534 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
535 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
536 tophys(r1,r1); \
Michal Simekca545022009-05-26 16:30:21 +0200537 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
Michal Simekca545022009-05-26 16:30:21 +0200538 SAVE_REGS \
Michal Simek77f6d222010-06-22 16:39:56 +0200539 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
Michal Simekca545022009-05-26 16:30:21 +0200540 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
541 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
Michal Simekb3180672010-06-22 17:46:27 +0200542 /* MS: I am clearing UMS even in case when I come from kernel space */ \
543 clear_ums; \
Michal Simek06a54602010-06-22 16:22:01 +02005442: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200545
546C_ENTRY(full_exception_trap):
Michal Simekca545022009-05-26 16:30:21 +0200547 /* adjust exception address for privileged instruction
548 * for finding where is it */
549 addik r17, r17, -4
550 SAVE_STATE /* Save registers */
Michal Simek06a54602010-06-22 16:22:01 +0200551 /* PC, before IRQ/trap - this is one instruction above */
552 swi r17, r1, PTO+PT_PC;
553 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200554 /* FIXME this can be store directly in PT_ESR reg.
555 * I tested it but there is a fault */
556 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200557 addik r15, r0, ret_from_exc - 8
558 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200559 mfs r6, resr
560 nop
561 mfs r7, rfsr; /* save FSR */
562 nop
Michal Simek131e4e92009-09-28 08:50:53 +0200563 mts rfsr, r0; /* Clear sticky fsr */
564 nop
Michal Simekb9ea77e2010-07-28 12:40:02 +0200565 addik r12, r0, full_exception
Michal Simekca545022009-05-26 16:30:21 +0200566 set_vms;
Michal Simek8b110d12010-06-17 16:03:05 +0200567 rted r12, 0;
Michal Simekca545022009-05-26 16:30:21 +0200568 nop;
569
570/*
571 * Unaligned data trap.
572 *
573 * Unaligned data trap last on 4k page is handled here.
574 *
575 * Trap entered via exception, so EE bit is set, and interrupts
576 * are masked. This is nice, means we don't have to CLI before state save
577 *
578 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
579 */
580C_ENTRY(unaligned_data_trap):
Michal Simek8b110d12010-06-17 16:03:05 +0200581 /* MS: I have to save r11 value and then restore it because
582 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
583 * instructions are not used. We don't need to do if MSR instructions
584 * are used and they use r0 instead of r11.
585 * I am using ENTRY_SP which should be primary used only for stack
586 * pointer saving. */
587 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
588 set_bip; /* equalize initial state for all possible entries */
589 clear_eip;
590 set_ee;
591 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
Michal Simekca545022009-05-26 16:30:21 +0200592 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200593 /* PC, before IRQ/trap - this is one instruction above */
594 swi r17, r1, PTO+PT_PC;
595 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200596 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200597 addik r15, r0, ret_from_exc-8
Michal Simekca545022009-05-26 16:30:21 +0200598 mfs r3, resr /* ESR */
599 nop
600 mfs r4, rear /* EAR */
601 nop
Michal Simekb9ea77e2010-07-28 12:40:02 +0200602 addik r7, r1, PTO /* parameter struct pt_regs * regs */
603 addik r12, r0, _unaligned_data_exception
Michal Simekca545022009-05-26 16:30:21 +0200604 set_vms;
605 rtbd r12, 0; /* interrupts enabled */
606 nop;
607
608/*
609 * Page fault traps.
610 *
611 * If the real exception handler (from hw_exception_handler.S) didn't find
612 * the mapping for the process, then we're thrown here to handle such situation.
613 *
614 * Trap entered via exceptions, so EE bit is set, and interrupts
615 * are masked. This is nice, means we don't have to CLI before state save
616 *
617 * Build a standard exception frame for TLB Access errors. All TLB exceptions
618 * will bail out to this point if they can't resolve the lightweight TLB fault.
619 *
620 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
621 * void do_page_fault(struct pt_regs *regs,
622 * unsigned long address,
623 * unsigned long error_code)
624 */
625/* data and intruction trap - which is choose is resolved int fault.c */
626C_ENTRY(page_fault_data_trap):
Michal Simekca545022009-05-26 16:30:21 +0200627 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200628 /* PC, before IRQ/trap - this is one instruction above */
629 swi r17, r1, PTO+PT_PC;
630 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200631 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200632 addik r15, r0, ret_from_exc-8
633 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200634 mfs r6, rear /* parameter unsigned long address */
635 nop
636 mfs r7, resr /* parameter unsigned long error_code */
637 nop
Michal Simekb9ea77e2010-07-28 12:40:02 +0200638 addik r12, r0, do_page_fault
Michal Simekca545022009-05-26 16:30:21 +0200639 set_vms;
Michal Simek8b110d12010-06-17 16:03:05 +0200640 rted r12, 0; /* interrupts enabled */
Michal Simekca545022009-05-26 16:30:21 +0200641 nop;
642
643C_ENTRY(page_fault_instr_trap):
Michal Simekca545022009-05-26 16:30:21 +0200644 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200645 /* PC, before IRQ/trap - this is one instruction above */
646 swi r17, r1, PTO+PT_PC;
647 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200648 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200649 addik r15, r0, ret_from_exc-8
650 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200651 mfs r6, rear /* parameter unsigned long address */
652 nop
653 ori r7, r0, 0 /* parameter unsigned long error_code */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200654 addik r12, r0, do_page_fault
Michal Simekca545022009-05-26 16:30:21 +0200655 set_vms;
Michal Simek8b110d12010-06-17 16:03:05 +0200656 rted r12, 0; /* interrupts enabled */
Michal Simekca545022009-05-26 16:30:21 +0200657 nop;
658
659/* Entry point used to return from an exception. */
660C_ENTRY(ret_from_exc):
Michal Simek77f6d222010-06-22 16:39:56 +0200661 lwi r11, r1, PTO + PT_MODE;
Michal Simekca545022009-05-26 16:30:21 +0200662 bnei r11, 2f; /* See if returning to kernel mode, */
663 /* ... if so, skip resched &c. */
664
665 /* We're returning to user mode, so check for various conditions that
666 trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100667 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200668 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
669 andi r11, r11, _TIF_NEED_RESCHED;
670 beqi r11, 5f;
671
672/* Call the scheduler before returning from a syscall/trap. */
673 bralid r15, schedule; /* Call scheduler */
674 nop; /* delay slot */
675
676 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01006775: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200678 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
679 andi r11, r11, _TIF_SIGPENDING;
680 beqi r11, 1f; /* Signals to handle, handle them */
681
682 /*
683 * Handle a signal return; Pending signals should be in r18.
684 *
685 * Not all registers are saved by the normal trap/interrupt entry
686 * points (for instance, call-saved registers (because the normal
687 * C-compiler calling sequence in the kernel makes sure they're
688 * preserved), and call-clobbered registers in the case of
689 * traps), but signal handlers may want to examine or change the
690 * complete register state. Here we save anything not saved by
691 * the normal entry sequence, so that it may be safely restored
Michal Simek36f60952010-06-22 13:27:43 +0200692 * (in a possibly modified form) after do_signal returns. */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200693 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200694 addi r7, r0, 0; /* Arg 3: int in_syscall */
695 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100696 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekca545022009-05-26 16:30:21 +0200697
698/* Finally, return to user state. */
Michal Simek96014cc2010-06-22 14:05:43 +02006991: set_bip; /* Ints masked for state restore */
Michal Simek8633beb2010-02-22 13:24:43 +0100700 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200701 VM_OFF;
702 tophys(r1,r1);
703
Michal Simekca545022009-05-26 16:30:21 +0200704 RESTORE_REGS;
705 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
706
707 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
708 bri 6f;
709/* Return to kernel state. */
Michal Simek96014cc2010-06-22 14:05:43 +02007102: set_bip; /* Ints masked for state restore */
711 VM_OFF;
Michal Simekca545022009-05-26 16:30:21 +0200712 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200713 RESTORE_REGS;
714 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
715
716 tovirt(r1,r1);
7176:
718EXC_return: /* Make global symbol for debugging */
719 rtbd r14, 0; /* Instructions to return from an IRQ */
720 nop;
721
722/*
723 * HW EXCEPTION rutine end
724 */
725
726/*
727 * Hardware maskable interrupts.
728 *
729 * The stack-pointer (r1) should have already been saved to the memory
730 * location PER_CPU(ENTRY_SP).
731 */
732C_ENTRY(_interrupt):
733/* MS: we are in physical address */
734/* Save registers, switch to proper stack, convert SP to virtual.*/
735 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
Michal Simekca545022009-05-26 16:30:21 +0200736 /* MS: See if already in kernel mode. */
Michal Simek653e4472010-06-22 14:51:45 +0200737 mfs r1, rmsr
Michal Simek5c0d72b2010-06-22 14:00:12 +0200738 nop
Michal Simek653e4472010-06-22 14:51:45 +0200739 andi r1, r1, MSR_UMS
740 bnei r1, 1f
Michal Simekca545022009-05-26 16:30:21 +0200741
742/* Kernel-mode state save. */
Michal Simek653e4472010-06-22 14:51:45 +0200743 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
744 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
Michal Simekca545022009-05-26 16:30:21 +0200745 /* save registers */
746/* MS: Make room on the stack -> activation record */
747 addik r1, r1, -STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200748 SAVE_REGS
Michal Simek77f6d222010-06-22 16:39:56 +0200749 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
Michal Simekca545022009-05-26 16:30:21 +0200750 brid 2f;
751 nop; /* MS: Fill delay slot */
752
7531:
754/* User-mode state save. */
Michal Simekca545022009-05-26 16:30:21 +0200755 /* MS: get the saved current */
756 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
757 tophys(r1,r1);
758 lwi r1, r1, TS_THREAD_INFO;
759 addik r1, r1, THREAD_SIZE;
760 tophys(r1,r1);
761 /* save registers */
762 addik r1, r1, -STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200763 SAVE_REGS
764 /* calculate mode */
765 swi r0, r1, PTO + PT_MODE;
766 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
767 swi r11, r1, PTO+PT_R1;
Michal Simekca545022009-05-26 16:30:21 +02007682:
Michal Simekb1d70c62010-01-22 10:24:06 +0100769 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200770 tovirt(r1,r1)
Michal Simekb9ea77e2010-07-28 12:40:02 +0200771 addik r5, r1, PTO;
Michal Simekca545022009-05-26 16:30:21 +0200772 set_vms;
Michal Simekb9ea77e2010-07-28 12:40:02 +0200773 addik r11, r0, do_IRQ;
774 addik r15, r0, irq_call;
Michal Simekca545022009-05-26 16:30:21 +0200775irq_call:rtbd r11, 0;
776 nop;
777
778/* MS: we are in virtual mode */
779ret_from_irq:
780 lwi r11, r1, PTO + PT_MODE;
781 bnei r11, 2f;
782
Michal Simekb1d70c62010-01-22 10:24:06 +0100783 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200784 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
785 andi r11, r11, _TIF_NEED_RESCHED;
786 beqi r11, 5f
787 bralid r15, schedule;
788 nop; /* delay slot */
789
790 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01007915: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200792 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
793 andi r11, r11, _TIF_SIGPENDING;
794 beqid r11, no_intr_resched
795/* Handle a signal return; Pending signals should be in r18. */
796 addi r7, r0, 0; /* Arg 3: int in_syscall */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200797 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200798 bralid r15, do_signal; /* Handle any signals */
799 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
800
801/* Finally, return to user state. */
802no_intr_resched:
803 /* Disable interrupts, we are now committed to the state restore */
804 disable_irq
Michal Simek8633beb2010-02-22 13:24:43 +0100805 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
Michal Simekca545022009-05-26 16:30:21 +0200806 VM_OFF;
807 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200808 RESTORE_REGS
809 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
810 lwi r1, r1, PT_R1 - PT_SIZE;
811 bri 6f;
812/* MS: Return to kernel state. */
Michal Simek77753792010-01-12 09:55:10 +01008132:
814#ifdef CONFIG_PREEMPT
Michal Simekb1d70c62010-01-22 10:24:06 +0100815 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simek77753792010-01-12 09:55:10 +0100816 /* MS: get preempt_count from thread info */
817 lwi r5, r11, TI_PREEMPT_COUNT;
818 bgti r5, restore;
819
820 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
821 andi r5, r5, _TIF_NEED_RESCHED;
822 beqi r5, restore /* if zero jump over */
823
824preempt:
825 /* interrupts are off that's why I am calling preempt_chedule_irq */
826 bralid r15, preempt_schedule_irq
827 nop
Michal Simekb1d70c62010-01-22 10:24:06 +0100828 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek77753792010-01-12 09:55:10 +0100829 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
830 andi r5, r5, _TIF_NEED_RESCHED;
831 bnei r5, preempt /* if non zero jump to resched */
832restore:
833#endif
834 VM_OFF /* MS: turn off MMU */
Michal Simekca545022009-05-26 16:30:21 +0200835 tophys(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200836 RESTORE_REGS
837 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
838 tovirt(r1,r1);
8396:
840IRQ_return: /* MS: Make global symbol for debugging */
841 rtid r14, 0
842 nop
843
844/*
845 * `Debug' trap
846 * We enter dbtrap in "BIP" (breakpoint) mode.
847 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
848 * original dbtrap.
849 * however, wait to save state first
850 */
851C_ENTRY(_debug_exception):
852 /* BIP bit is set on entry, no interrupts can occur */
853 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
854
Michal Simek653e4472010-06-22 14:51:45 +0200855 mfs r1, rmsr
Michal Simek5c0d72b2010-06-22 14:00:12 +0200856 nop
Michal Simek653e4472010-06-22 14:51:45 +0200857 andi r1, r1, MSR_UMS
858 bnei r1, 1f
Michal Simekca545022009-05-26 16:30:21 +0200859 /* Kernel-mode state save. */
Michal Simek653e4472010-06-22 14:51:45 +0200860 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
861 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200862
863 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
Michal Simekca545022009-05-26 16:30:21 +0200864 SAVE_REGS;
865
Michal Simek77f6d222010-06-22 16:39:56 +0200866 swi r1, r1, PTO + PT_MODE;
Michal Simekca545022009-05-26 16:30:21 +0200867 brid 2f;
868 nop; /* Fill delay slot */
8691: /* User-mode state save. */
Michal Simekca545022009-05-26 16:30:21 +0200870 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
871 tophys(r1,r1);
872 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
873 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
874 tophys(r1,r1);
875
876 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
Michal Simekca545022009-05-26 16:30:21 +0200877 SAVE_REGS;
878
Michal Simek77f6d222010-06-22 16:39:56 +0200879 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
Michal Simekca545022009-05-26 16:30:21 +0200880 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
881 swi r11, r1, PTO+PT_R1; /* Store user SP. */
Michal Simek653e4472010-06-22 14:51:45 +02008822:
Michal Simekca545022009-05-26 16:30:21 +0200883 tovirt(r1,r1)
884
Michal Simek06b28642010-06-22 15:25:24 +0200885 set_vms;
Michal Simekca545022009-05-26 16:30:21 +0200886 addi r5, r0, SIGTRAP /* send the trap signal */
887 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
888 addk r7, r0, r0 /* 3rd param zero */
Michal Simek06b28642010-06-22 15:25:24 +0200889dbtrap_call: rtbd r0, send_sig;
Michal Simekb9ea77e2010-07-28 12:40:02 +0200890 addik r15, r0, dbtrap_call;
Michal Simekca545022009-05-26 16:30:21 +0200891
892 set_bip; /* Ints masked for state restore*/
Michal Simek77f6d222010-06-22 16:39:56 +0200893 lwi r11, r1, PTO + PT_MODE;
Michal Simekca545022009-05-26 16:30:21 +0200894 bnei r11, 2f;
895
896 /* Get current task ptr into r11 */
Michal Simekb1d70c62010-01-22 10:24:06 +0100897 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200898 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
899 andi r11, r11, _TIF_NEED_RESCHED;
900 beqi r11, 5f;
901
902/* Call the scheduler before returning from a syscall/trap. */
903
904 bralid r15, schedule; /* Call scheduler */
905 nop; /* delay slot */
906 /* XXX Is PT_DTRACE handling needed here? */
907 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
908
909 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01009105: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200911 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
912 andi r11, r11, _TIF_SIGPENDING;
913 beqi r11, 1f; /* Signals to handle, handle them */
914
915/* Handle a signal return; Pending signals should be in r18. */
916 /* Not all registers are saved by the normal trap/interrupt entry
917 points (for instance, call-saved registers (because the normal
918 C-compiler calling sequence in the kernel makes sure they're
919 preserved), and call-clobbered registers in the case of
920 traps), but signal handlers may want to examine or change the
921 complete register state. Here we save anything not saved by
922 the normal entry sequence, so that it may be safely restored
923 (in a possibly modified form) after do_signal returns. */
924
Michal Simekb9ea77e2010-07-28 12:40:02 +0200925 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200926 addi r7, r0, 0; /* Arg 3: int in_syscall */
927 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100928 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekca545022009-05-26 16:30:21 +0200929
930
931/* Finally, return to user state. */
Michal Simek5c0d72b2010-06-22 14:00:12 +02009321:
Michal Simek8633beb2010-02-22 13:24:43 +0100933 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200934 VM_OFF;
935 tophys(r1,r1);
936
Michal Simekca545022009-05-26 16:30:21 +0200937 RESTORE_REGS
938 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
939
940
941 lwi r1, r1, PT_R1 - PT_SIZE;
942 /* Restore user stack pointer. */
943 bri 6f;
944
945/* Return to kernel state. */
9462: VM_OFF;
947 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200948 RESTORE_REGS
949 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
950
951 tovirt(r1,r1);
9526:
953DBTRAP_return: /* Make global symbol for debugging */
954 rtbd r14, 0; /* Instructions to return from an IRQ */
955 nop;
956
957
958
959ENTRY(_switch_to)
960 /* prepare return value */
Michal Simekb1d70c62010-01-22 10:24:06 +0100961 addk r3, r0, CURRENT_TASK
Michal Simekca545022009-05-26 16:30:21 +0200962
963 /* save registers in cpu_context */
964 /* use r11 and r12, volatile registers, as temp register */
965 /* give start of cpu_context for previous process */
966 addik r11, r5, TI_CPU_CONTEXT
967 swi r1, r11, CC_R1
968 swi r2, r11, CC_R2
969 /* skip volatile registers.
970 * they are saved on stack when we jumped to _switch_to() */
971 /* dedicated registers */
972 swi r13, r11, CC_R13
973 swi r14, r11, CC_R14
974 swi r15, r11, CC_R15
975 swi r16, r11, CC_R16
976 swi r17, r11, CC_R17
977 swi r18, r11, CC_R18
978 /* save non-volatile registers */
979 swi r19, r11, CC_R19
980 swi r20, r11, CC_R20
981 swi r21, r11, CC_R21
982 swi r22, r11, CC_R22
983 swi r23, r11, CC_R23
984 swi r24, r11, CC_R24
985 swi r25, r11, CC_R25
986 swi r26, r11, CC_R26
987 swi r27, r11, CC_R27
988 swi r28, r11, CC_R28
989 swi r29, r11, CC_R29
990 swi r30, r11, CC_R30
991 /* special purpose registers */
992 mfs r12, rmsr
993 nop
994 swi r12, r11, CC_MSR
995 mfs r12, rear
996 nop
997 swi r12, r11, CC_EAR
998 mfs r12, resr
999 nop
1000 swi r12, r11, CC_ESR
1001 mfs r12, rfsr
1002 nop
1003 swi r12, r11, CC_FSR
1004
Michal Simekb1d70c62010-01-22 10:24:06 +01001005 /* update r31, the current-give me pointer to task which will be next */
1006 lwi CURRENT_TASK, r6, TI_TASK
Michal Simekca545022009-05-26 16:30:21 +02001007 /* stored it to current_save too */
Michal Simekb1d70c62010-01-22 10:24:06 +01001008 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
Michal Simekca545022009-05-26 16:30:21 +02001009
1010 /* get new process' cpu context and restore */
1011 /* give me start where start context of next task */
1012 addik r11, r6, TI_CPU_CONTEXT
1013
1014 /* non-volatile registers */
1015 lwi r30, r11, CC_R30
1016 lwi r29, r11, CC_R29
1017 lwi r28, r11, CC_R28
1018 lwi r27, r11, CC_R27
1019 lwi r26, r11, CC_R26
1020 lwi r25, r11, CC_R25
1021 lwi r24, r11, CC_R24
1022 lwi r23, r11, CC_R23
1023 lwi r22, r11, CC_R22
1024 lwi r21, r11, CC_R21
1025 lwi r20, r11, CC_R20
1026 lwi r19, r11, CC_R19
1027 /* dedicated registers */
1028 lwi r18, r11, CC_R18
1029 lwi r17, r11, CC_R17
1030 lwi r16, r11, CC_R16
1031 lwi r15, r11, CC_R15
1032 lwi r14, r11, CC_R14
1033 lwi r13, r11, CC_R13
1034 /* skip volatile registers */
1035 lwi r2, r11, CC_R2
1036 lwi r1, r11, CC_R1
1037
1038 /* special purpose registers */
1039 lwi r12, r11, CC_FSR
1040 mts rfsr, r12
1041 nop
1042 lwi r12, r11, CC_MSR
1043 mts rmsr, r12
1044 nop
1045
1046 rtsd r15, 8
1047 nop
1048
1049ENTRY(_reset)
1050 brai 0x70; /* Jump back to FS-boot */
1051
1052ENTRY(_break)
1053 mfs r5, rmsr
1054 nop
1055 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1056 mfs r5, resr
1057 nop
1058 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1059 bri 0
1060
1061 /* These are compiled and loaded into high memory, then
1062 * copied into place in mach_early_setup */
1063 .section .init.ivt, "ax"
1064 .org 0x0
1065 /* this is very important - here is the reset vector */
1066 /* in current MMU branch you don't care what is here - it is
1067 * used from bootloader site - but this is correct for FS-BOOT */
1068 brai 0x70
1069 nop
1070 brai TOPHYS(_user_exception); /* syscall handler */
1071 brai TOPHYS(_interrupt); /* Interrupt handler */
1072 brai TOPHYS(_break); /* nmi trap handler */
1073 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1074
1075 .org 0x60
1076 brai TOPHYS(_debug_exception); /* debug trap handler*/
1077
1078.section .rodata,"a"
1079#include "syscall_table.S"
1080
1081syscall_table_size=(.-sys_call_table)
1082
Steven J. Magnanice3266c2010-04-27 12:37:54 -05001083type_SYSCALL:
1084 .ascii "SYSCALL\0"
1085type_IRQ:
1086 .ascii "IRQ\0"
1087type_IRQ_PREEMPT:
1088 .ascii "IRQ (PREEMPTED)\0"
1089type_SYSCALL_PREEMPT:
1090 .ascii " SYSCALL (PREEMPTED)\0"
1091
1092 /*
1093 * Trap decoding for stack unwinder
1094 * Tuples are (start addr, end addr, string)
1095 * If return address lies on [start addr, end addr],
1096 * unwinder displays 'string'
1097 */
1098
1099 .align 4
1100.global microblaze_trap_handlers
1101microblaze_trap_handlers:
1102 /* Exact matches come first */
1103 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1104 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1105 /* Fuzzy matches go here */
1106 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1107 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1108 /* End of table */
1109 .word 0 ; .word 0 ; .word 0