blob: 819238b8a429c6ce55567fa762fb8b12ca11644c [file] [log] [blame]
Michal Simekca545022009-05-26 16:30:21 +02001/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
Michal Simek11d51362009-12-07 08:21:34 +010034#undef DEBUG
35
Michal Simekca545022009-05-26 16:30:21 +020036/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
Michal Simek66f7de862010-06-22 17:52:47 +020051 msrclr r0, MSR_BIP
Michal Simekca545022009-05-26 16:30:21 +020052 .endm
53
54 .macro set_bip
Michal Simek66f7de862010-06-22 17:52:47 +020055 msrset r0, MSR_BIP
Michal Simekca545022009-05-26 16:30:21 +020056 .endm
57
58 .macro clear_eip
Michal Simek66f7de862010-06-22 17:52:47 +020059 msrclr r0, MSR_EIP
Michal Simekca545022009-05-26 16:30:21 +020060 .endm
61
62 .macro set_ee
Michal Simek66f7de862010-06-22 17:52:47 +020063 msrset r0, MSR_EE
Michal Simekca545022009-05-26 16:30:21 +020064 .endm
65
66 .macro disable_irq
Michal Simek66f7de862010-06-22 17:52:47 +020067 msrclr r0, MSR_IE
Michal Simekca545022009-05-26 16:30:21 +020068 .endm
69
70 .macro enable_irq
Michal Simek66f7de862010-06-22 17:52:47 +020071 msrset r0, MSR_IE
Michal Simekca545022009-05-26 16:30:21 +020072 .endm
73
74 .macro set_ums
Michal Simek66f7de862010-06-22 17:52:47 +020075 msrset r0, MSR_UMS
Michal Simek66f7de862010-06-22 17:52:47 +020076 msrclr r0, MSR_VMS
Michal Simekca545022009-05-26 16:30:21 +020077 .endm
78
79 .macro set_vms
Michal Simek66f7de862010-06-22 17:52:47 +020080 msrclr r0, MSR_UMS
Michal Simek66f7de862010-06-22 17:52:47 +020081 msrset r0, MSR_VMS
Michal Simekca545022009-05-26 16:30:21 +020082 .endm
83
Michal Simekb3180672010-06-22 17:46:27 +020084 .macro clear_ums
Michal Simek66f7de862010-06-22 17:52:47 +020085 msrclr r0, MSR_UMS
Michal Simekb3180672010-06-22 17:46:27 +020086 .endm
87
Michal Simekca545022009-05-26 16:30:21 +020088 .macro clear_vms_ums
Michal Simek66f7de862010-06-22 17:52:47 +020089 msrclr r0, MSR_VMS | MSR_UMS
Michal Simekca545022009-05-26 16:30:21 +020090 .endm
91#else
92 .macro clear_bip
93 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +020094 andi r11, r11, ~MSR_BIP
95 mts rmsr, r11
Michal Simekca545022009-05-26 16:30:21 +020096 .endm
97
98 .macro set_bip
99 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200100 ori r11, r11, MSR_BIP
101 mts rmsr, r11
Michal Simekca545022009-05-26 16:30:21 +0200102 .endm
103
104 .macro clear_eip
105 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200106 andi r11, r11, ~MSR_EIP
107 mts rmsr, r11
Michal Simekca545022009-05-26 16:30:21 +0200108 .endm
109
110 .macro set_ee
111 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200112 ori r11, r11, MSR_EE
113 mts rmsr, r11
Michal Simekca545022009-05-26 16:30:21 +0200114 .endm
115
116 .macro disable_irq
117 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200118 andi r11, r11, ~MSR_IE
119 mts rmsr, r11
Michal Simekca545022009-05-26 16:30:21 +0200120 .endm
121
122 .macro enable_irq
123 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200124 ori r11, r11, MSR_IE
125 mts rmsr, r11
Michal Simekca545022009-05-26 16:30:21 +0200126 .endm
127
128 .macro set_ums
129 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200130 ori r11, r11, MSR_VMS
131 andni r11, r11, MSR_UMS
132 mts rmsr, r11
Michal Simekca545022009-05-26 16:30:21 +0200133 .endm
134
135 .macro set_vms
136 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200137 ori r11, r11, MSR_VMS
138 andni r11, r11, MSR_UMS
139 mts rmsr, r11
Michal Simekca545022009-05-26 16:30:21 +0200140 .endm
141
Michal Simekb3180672010-06-22 17:46:27 +0200142 .macro clear_ums
143 mfs r11, rmsr
Michal Simekb3180672010-06-22 17:46:27 +0200144 andni r11, r11, MSR_UMS
145 mts rmsr,r11
Michal Simekb3180672010-06-22 17:46:27 +0200146 .endm
147
Michal Simekca545022009-05-26 16:30:21 +0200148 .macro clear_vms_ums
149 mfs r11, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200150 andni r11, r11, (MSR_VMS|MSR_UMS)
151 mts rmsr,r11
Michal Simekca545022009-05-26 16:30:21 +0200152 .endm
153#endif
154
155/* Define how to call high-level functions. With MMU, virtual mode must be
156 * enabled when calling the high-level function. Clobbers R11.
157 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
158 */
159
160/* turn on virtual protected mode save */
161#define VM_ON \
Michal Simeka4a94db2010-06-22 13:15:53 +0200162 set_ums; \
Michal Simekca545022009-05-26 16:30:21 +0200163 rted r0, 2f; \
Michal Simeka4a94db2010-06-22 13:15:53 +0200164 nop; \
1652:
Michal Simekca545022009-05-26 16:30:21 +0200166
167/* turn off virtual protected mode save and user mode save*/
168#define VM_OFF \
Michal Simeka4a94db2010-06-22 13:15:53 +0200169 clear_vms_ums; \
Michal Simekca545022009-05-26 16:30:21 +0200170 rted r0, TOPHYS(1f); \
Michal Simeka4a94db2010-06-22 13:15:53 +0200171 nop; \
1721:
Michal Simekca545022009-05-26 16:30:21 +0200173
174#define SAVE_REGS \
175 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
Michal Simek36f60952010-06-22 13:27:43 +0200176 swi r3, r1, PTO+PT_R3; \
177 swi r4, r1, PTO+PT_R4; \
Michal Simekca545022009-05-26 16:30:21 +0200178 swi r5, r1, PTO+PT_R5; \
179 swi r6, r1, PTO+PT_R6; \
180 swi r7, r1, PTO+PT_R7; \
181 swi r8, r1, PTO+PT_R8; \
182 swi r9, r1, PTO+PT_R9; \
183 swi r10, r1, PTO+PT_R10; \
184 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
185 swi r12, r1, PTO+PT_R12; \
186 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
187 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
188 swi r15, r1, PTO+PT_R15; /* Save LP */ \
Michal Simek600eb612010-08-09 14:37:42 +0200189 swi r16, r1, PTO+PT_R16; \
190 swi r17, r1, PTO+PT_R17; \
Michal Simekca545022009-05-26 16:30:21 +0200191 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
192 swi r19, r1, PTO+PT_R19; \
193 swi r20, r1, PTO+PT_R20; \
194 swi r21, r1, PTO+PT_R21; \
195 swi r22, r1, PTO+PT_R22; \
196 swi r23, r1, PTO+PT_R23; \
197 swi r24, r1, PTO+PT_R24; \
198 swi r25, r1, PTO+PT_R25; \
199 swi r26, r1, PTO+PT_R26; \
200 swi r27, r1, PTO+PT_R27; \
201 swi r28, r1, PTO+PT_R28; \
202 swi r29, r1, PTO+PT_R29; \
203 swi r30, r1, PTO+PT_R30; \
204 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
205 mfs r11, rmsr; /* save MSR */ \
Michal Simekca545022009-05-26 16:30:21 +0200206 swi r11, r1, PTO+PT_MSR;
207
208#define RESTORE_REGS \
209 lwi r11, r1, PTO+PT_MSR; \
210 mts rmsr , r11; \
Michal Simekca545022009-05-26 16:30:21 +0200211 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
Michal Simek36f60952010-06-22 13:27:43 +0200212 lwi r3, r1, PTO+PT_R3; \
213 lwi r4, r1, PTO+PT_R4; \
Michal Simekca545022009-05-26 16:30:21 +0200214 lwi r5, r1, PTO+PT_R5; \
215 lwi r6, r1, PTO+PT_R6; \
216 lwi r7, r1, PTO+PT_R7; \
217 lwi r8, r1, PTO+PT_R8; \
218 lwi r9, r1, PTO+PT_R9; \
219 lwi r10, r1, PTO+PT_R10; \
220 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
221 lwi r12, r1, PTO+PT_R12; \
222 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
223 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
224 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
Michal Simek600eb612010-08-09 14:37:42 +0200225 lwi r16, r1, PTO+PT_R16; \
226 lwi r17, r1, PTO+PT_R17; \
Michal Simekca545022009-05-26 16:30:21 +0200227 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
228 lwi r19, r1, PTO+PT_R19; \
229 lwi r20, r1, PTO+PT_R20; \
230 lwi r21, r1, PTO+PT_R21; \
231 lwi r22, r1, PTO+PT_R22; \
232 lwi r23, r1, PTO+PT_R23; \
233 lwi r24, r1, PTO+PT_R24; \
234 lwi r25, r1, PTO+PT_R25; \
235 lwi r26, r1, PTO+PT_R26; \
236 lwi r27, r1, PTO+PT_R27; \
237 lwi r28, r1, PTO+PT_R28; \
238 lwi r29, r1, PTO+PT_R29; \
239 lwi r30, r1, PTO+PT_R30; \
240 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
241
Michal Simeke5d2af22010-06-22 17:58:26 +0200242#define SAVE_STATE \
243 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
244 /* See if already in kernel mode.*/ \
245 mfs r1, rmsr; \
Michal Simeke5d2af22010-06-22 17:58:26 +0200246 andi r1, r1, MSR_UMS; \
247 bnei r1, 1f; \
248 /* Kernel-mode state save. */ \
249 /* Reload kernel stack-ptr. */ \
250 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
Michal Simek287503f2010-06-22 18:16:07 +0200251 /* FIXME: I can add these two lines to one */ \
252 /* tophys(r1,r1); */ \
253 /* addik r1, r1, -STATE_SAVE_SIZE; */ \
254 addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
Michal Simeke5d2af22010-06-22 17:58:26 +0200255 SAVE_REGS \
Michal Simeke5d2af22010-06-22 17:58:26 +0200256 brid 2f; \
Michal Simekda233552010-06-22 18:02:06 +0200257 swi r1, r1, PTO+PT_MODE; \
Michal Simeke5d2af22010-06-22 17:58:26 +02002581: /* User-mode state save. */ \
259 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
260 tophys(r1,r1); \
261 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
Michal Simek287503f2010-06-22 18:16:07 +0200262 /* MS these three instructions can be added to one */ \
263 /* addik r1, r1, THREAD_SIZE; */ \
264 /* tophys(r1,r1); */ \
265 /* addik r1, r1, -STATE_SAVE_SIZE; */ \
266 addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
Michal Simeke5d2af22010-06-22 17:58:26 +0200267 SAVE_REGS \
Michal Simeke5d2af22010-06-22 17:58:26 +0200268 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
269 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
Michal Simeke7741072010-06-22 18:00:35 +0200270 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
Michal Simeke5d2af22010-06-22 17:58:26 +0200271 /* MS: I am clearing UMS even in case when I come from kernel space */ \
272 clear_ums; \
2732: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
274
Michal Simekca545022009-05-26 16:30:21 +0200275.text
276
277/*
278 * User trap.
279 *
280 * System calls are handled here.
281 *
282 * Syscall protocol:
283 * Syscall number in r12, args in r5-r10
284 * Return value in r3
285 *
286 * Trap entered via brki instruction, so BIP bit is set, and interrupts
287 * are masked. This is nice, means we don't have to CLI before state save
288 */
289C_ENTRY(_user_exception):
Michal Simekca545022009-05-26 16:30:21 +0200290 addi r14, r14, 4 /* return address is 4 byte after call */
Michal Simek0e41c902010-06-22 21:11:49 +0200291 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
Michal Simekca545022009-05-26 16:30:21 +0200292
Michal Simekca545022009-05-26 16:30:21 +0200293 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
294 tophys(r1,r1);
295 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
Michal Simek0e41c902010-06-22 21:11:49 +0200296 /* MS these three instructions can be added to one */
297 /* addik r1, r1, THREAD_SIZE; */
298 /* tophys(r1,r1); */
299 /* addik r1, r1, -STATE_SAVE_SIZE; */
300 addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200301 SAVE_REGS
Michal Simek7d432092010-08-06 10:42:30 +0200302 swi r0, r1, PTO + PT_R3
303 swi r0, r1, PTO + PT_R4
Michal Simekca545022009-05-26 16:30:21 +0200304
Michal Simekca545022009-05-26 16:30:21 +0200305 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
306 swi r11, r1, PTO+PT_R1; /* Store user SP. */
Michal Simek25f6e592010-06-22 18:29:05 +0200307 clear_ums;
Michal Simek0e41c902010-06-22 21:11:49 +0200308 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200309 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1)
312
Michal Simekca545022009-05-26 16:30:21 +0200313/* where the trap should return need -8 to adjust for rtsd r15, 8*/
314/* Jump to the appropriate function for the system call number in r12
315 * (r12 is not preserved), or return an error if r12 is not valid. The LP
316 * register should point to the location where
317 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
Michal Simek23575482009-08-24 13:26:04 +0200318
Michal Simek25f6e592010-06-22 18:29:05 +0200319 /* Step into virtual mode */
320 rtbd r0, 3f
Michal Simek23575482009-08-24 13:26:04 +0200321 nop
3223:
Michal Simekb1d70c62010-01-22 10:24:06 +0100323 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200324 lwi r11, r11, TI_FLAGS /* get flags in thread info */
325 andi r11, r11, _TIF_WORK_SYSCALL_MASK
326 beqi r11, 4f
327
328 addik r3, r0, -ENOSYS
329 swi r3, r1, PTO + PT_R3
330 brlid r15, do_syscall_trace_enter
331 addik r5, r1, PTO + PT_R0
332
333 # do_syscall_trace_enter returns the new syscall nr.
334 addk r12, r0, r3
335 lwi r5, r1, PTO+PT_R5;
336 lwi r6, r1, PTO+PT_R6;
337 lwi r7, r1, PTO+PT_R7;
338 lwi r8, r1, PTO+PT_R8;
339 lwi r9, r1, PTO+PT_R9;
340 lwi r10, r1, PTO+PT_R10;
3414:
342/* Jump to the appropriate function for the system call number in r12
343 * (r12 is not preserved), or return an error if r12 is not valid.
344 * The LP register should point to the location where the called function
345 * should return. [note that MAKE_SYS_CALL uses label 1] */
346 /* See if the system call number is valid */
Michal Simekca545022009-05-26 16:30:21 +0200347 addi r11, r12, -__NR_syscalls;
Michal Simek23575482009-08-24 13:26:04 +0200348 bgei r11,5f;
Michal Simekca545022009-05-26 16:30:21 +0200349 /* Figure out which function to use for this system call. */
350 /* Note Microblaze barrel shift is optional, so don't rely on it */
351 add r12, r12, r12; /* convert num -> ptr */
352 add r12, r12, r12;
353
Michal Simek11d51362009-12-07 08:21:34 +0100354#ifdef DEBUG
Michal Simekca545022009-05-26 16:30:21 +0200355 /* Trac syscalls and stored them to r0_ram */
Michal Simek23575482009-08-24 13:26:04 +0200356 lwi r3, r12, 0x400 + r0_ram
Michal Simekca545022009-05-26 16:30:21 +0200357 addi r3, r3, 1
Michal Simek23575482009-08-24 13:26:04 +0200358 swi r3, r12, 0x400 + r0_ram
Michal Simek11d51362009-12-07 08:21:34 +0100359#endif
Michal Simekca545022009-05-26 16:30:21 +0200360
Michal Simek23575482009-08-24 13:26:04 +0200361 # Find and jump into the syscall handler.
362 lwi r12, r12, sys_call_table
363 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200364 addi r15, r0, ret_from_trap-8
Michal Simek23575482009-08-24 13:26:04 +0200365 bra r12
366
Michal Simekca545022009-05-26 16:30:21 +0200367 /* The syscall number is invalid, return an error. */
Michal Simek23575482009-08-24 13:26:04 +02003685:
Michal Simek9814cc12010-06-22 18:09:29 +0200369 rtsd r15, 8; /* looks like a normal subroutine return */
Michal Simekca545022009-05-26 16:30:21 +0200370 addi r3, r0, -ENOSYS;
Michal Simekca545022009-05-26 16:30:21 +0200371
Michal Simek23575482009-08-24 13:26:04 +0200372/* Entry point used to return from a syscall/trap */
Michal Simekca545022009-05-26 16:30:21 +0200373/* We re-enable BIP bit before state restore */
374C_ENTRY(ret_from_trap):
Michal Simekb1d70c62010-01-22 10:24:06 +0100375 swi r3, r1, PTO + PT_R3
376 swi r4, r1, PTO + PT_R4
377
Michal Simekca545022009-05-26 16:30:21 +0200378 /* We're returning to user mode, so check for various conditions that
379 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100380 /* FIXME: Restructure all these flag checks. */
381 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200382 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
383 andi r11, r11, _TIF_WORK_SYSCALL_MASK
384 beqi r11, 1f
385
Michal Simek23575482009-08-24 13:26:04 +0200386 brlid r15, do_syscall_trace_leave
387 addik r5, r1, PTO + PT_R0
Michal Simek23575482009-08-24 13:26:04 +02003881:
Michal Simek23575482009-08-24 13:26:04 +0200389 /* We're returning to user mode, so check for various conditions that
390 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100391 /* get thread info from current task */
392 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200393 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
394 andi r11, r11, _TIF_NEED_RESCHED;
395 beqi r11, 5f;
396
Michal Simekca545022009-05-26 16:30:21 +0200397 bralid r15, schedule; /* Call scheduler */
398 nop; /* delay slot */
Michal Simekca545022009-05-26 16:30:21 +0200399
400 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01004015: /* get thread info from current task*/
402 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200403 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
404 andi r11, r11, _TIF_SIGPENDING;
405 beqi r11, 1f; /* Signals to handle, handle them */
406
Michal Simekb9ea77e2010-07-28 12:40:02 +0200407 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200408 addi r7, r0, 1; /* Arg 3: int in_syscall */
409 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100410 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekb1d70c62010-01-22 10:24:06 +0100411
412/* Finally, return to user state. */
Michal Simek96014cc2010-06-22 14:05:43 +02004131: set_bip; /* Ints masked for state restore */
Michal Simek8633beb2010-02-22 13:24:43 +0100414 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200415 VM_OFF;
416 tophys(r1,r1);
417 RESTORE_REGS;
418 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
419 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
Michal Simekca545022009-05-26 16:30:21 +0200420TRAP_return: /* Make global symbol for debugging */
421 rtbd r14, 0; /* Instructions to return from an IRQ */
422 nop;
423
424
425/* These syscalls need access to the struct pt_regs on the stack, so we
426 implement them in assembly (they're basically all wrappers anyway). */
427
428C_ENTRY(sys_fork_wrapper):
429 addi r5, r0, SIGCHLD /* Arg 0: flags */
430 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200431 addik r7, r1, PTO /* Arg 2: parent context */
Michal Simekca545022009-05-26 16:30:21 +0200432 add r8. r0, r0 /* Arg 3: (unused) */
433 add r9, r0, r0; /* Arg 4: (unused) */
Michal Simekca545022009-05-26 16:30:21 +0200434 brid do_fork /* Do real work (tail-call) */
Michal Simek9814cc12010-06-22 18:09:29 +0200435 add r10, r0, r0; /* Arg 5: (unused) */
Michal Simekca545022009-05-26 16:30:21 +0200436
437/* This the initial entry point for a new child thread, with an appropriate
438 stack in place that makes it look the the child is in the middle of an
439 syscall. This function is actually `returned to' from switch_thread
440 (copy_thread makes ret_from_fork the return address in each new thread's
441 saved context). */
442C_ENTRY(ret_from_fork):
443 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
444 add r3, r5, r0; /* switch_thread returns the prev task */
445 /* ( in the delay slot ) */
Michal Simekca545022009-05-26 16:30:21 +0200446 brid ret_from_trap; /* Do normal trap return */
Michal Simek9814cc12010-06-22 18:09:29 +0200447 add r3, r0, r0; /* Child's fork call should return 0. */
Michal Simekca545022009-05-26 16:30:21 +0200448
Arnd Bergmanne5135882009-06-18 19:55:30 +0200449C_ENTRY(sys_vfork):
450 brid microblaze_vfork /* Do real work (tail-call) */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200451 addik r5, r1, PTO
Michal Simekca545022009-05-26 16:30:21 +0200452
Arnd Bergmanne5135882009-06-18 19:55:30 +0200453C_ENTRY(sys_clone):
Michal Simekca545022009-05-26 16:30:21 +0200454 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
Michal Simek570e3e22010-06-04 13:06:27 +0200455 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
Michal Simekb9ea77e2010-07-28 12:40:02 +02004561: addik r7, r1, PTO; /* Arg 2: parent context */
457 add r8, r0, r0; /* Arg 3: (unused) */
458 add r9, r0, r0; /* Arg 4: (unused) */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200459 brid do_fork /* Do real work (tail-call) */
Michal Simek9814cc12010-06-22 18:09:29 +0200460 add r10, r0, r0; /* Arg 5: (unused) */
Michal Simekca545022009-05-26 16:30:21 +0200461
Arnd Bergmanne5135882009-06-18 19:55:30 +0200462C_ENTRY(sys_execve):
Arnd Bergmanne5135882009-06-18 19:55:30 +0200463 brid microblaze_execve; /* Do real work (tail-call).*/
Michal Simek9814cc12010-06-22 18:09:29 +0200464 addik r8, r1, PTO; /* add user context as 4th arg */
Michal Simekca545022009-05-26 16:30:21 +0200465
Michal Simekca545022009-05-26 16:30:21 +0200466C_ENTRY(sys_rt_sigreturn_wrapper):
Michal Simek791d0a12010-08-06 10:36:02 +0200467 brid sys_rt_sigreturn /* Do real work */
Michal Simek9814cc12010-06-22 18:09:29 +0200468 addik r5, r1, PTO; /* add user context as 1st arg */
Michal Simekca545022009-05-26 16:30:21 +0200469
470/*
471 * HW EXCEPTION rutine start
472 */
Michal Simekca545022009-05-26 16:30:21 +0200473C_ENTRY(full_exception_trap):
Michal Simekca545022009-05-26 16:30:21 +0200474 /* adjust exception address for privileged instruction
475 * for finding where is it */
476 addik r17, r17, -4
477 SAVE_STATE /* Save registers */
Michal Simek06a54602010-06-22 16:22:01 +0200478 /* PC, before IRQ/trap - this is one instruction above */
479 swi r17, r1, PTO+PT_PC;
480 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200481 /* FIXME this can be store directly in PT_ESR reg.
482 * I tested it but there is a fault */
483 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200484 addik r15, r0, ret_from_exc - 8
Michal Simekca545022009-05-26 16:30:21 +0200485 mfs r6, resr
Michal Simekca545022009-05-26 16:30:21 +0200486 mfs r7, rfsr; /* save FSR */
Michal Simek131e4e92009-09-28 08:50:53 +0200487 mts rfsr, r0; /* Clear sticky fsr */
Michal Simekc318d482010-06-22 16:25:31 +0200488 rted r0, full_exception
Michal Simek9814cc12010-06-22 18:09:29 +0200489 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200490
491/*
492 * Unaligned data trap.
493 *
494 * Unaligned data trap last on 4k page is handled here.
495 *
496 * Trap entered via exception, so EE bit is set, and interrupts
497 * are masked. This is nice, means we don't have to CLI before state save
498 *
499 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
500 */
501C_ENTRY(unaligned_data_trap):
Michal Simek8b110d12010-06-17 16:03:05 +0200502 /* MS: I have to save r11 value and then restore it because
503 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
504 * instructions are not used. We don't need to do if MSR instructions
505 * are used and they use r0 instead of r11.
506 * I am using ENTRY_SP which should be primary used only for stack
507 * pointer saving. */
508 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
509 set_bip; /* equalize initial state for all possible entries */
510 clear_eip;
511 set_ee;
512 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
Michal Simekca545022009-05-26 16:30:21 +0200513 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200514 /* PC, before IRQ/trap - this is one instruction above */
515 swi r17, r1, PTO+PT_PC;
516 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200517 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200518 addik r15, r0, ret_from_exc-8
Michal Simekca545022009-05-26 16:30:21 +0200519 mfs r3, resr /* ESR */
Michal Simekca545022009-05-26 16:30:21 +0200520 mfs r4, rear /* EAR */
Michal Simekc318d482010-06-22 16:25:31 +0200521 rtbd r0, _unaligned_data_exception
Michal Simekb9ea77e2010-07-28 12:40:02 +0200522 addik r7, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200523
524/*
525 * Page fault traps.
526 *
527 * If the real exception handler (from hw_exception_handler.S) didn't find
528 * the mapping for the process, then we're thrown here to handle such situation.
529 *
530 * Trap entered via exceptions, so EE bit is set, and interrupts
531 * are masked. This is nice, means we don't have to CLI before state save
532 *
533 * Build a standard exception frame for TLB Access errors. All TLB exceptions
534 * will bail out to this point if they can't resolve the lightweight TLB fault.
535 *
536 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
537 * void do_page_fault(struct pt_regs *regs,
538 * unsigned long address,
539 * unsigned long error_code)
540 */
541/* data and intruction trap - which is choose is resolved int fault.c */
542C_ENTRY(page_fault_data_trap):
Michal Simekca545022009-05-26 16:30:21 +0200543 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200544 /* PC, before IRQ/trap - this is one instruction above */
545 swi r17, r1, PTO+PT_PC;
546 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200547 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200548 addik r15, r0, ret_from_exc-8
Michal Simekca545022009-05-26 16:30:21 +0200549 mfs r6, rear /* parameter unsigned long address */
Michal Simekca545022009-05-26 16:30:21 +0200550 mfs r7, resr /* parameter unsigned long error_code */
Michal Simekc318d482010-06-22 16:25:31 +0200551 rted r0, do_page_fault
Michal Simek9814cc12010-06-22 18:09:29 +0200552 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200553
554C_ENTRY(page_fault_instr_trap):
Michal Simekca545022009-05-26 16:30:21 +0200555 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200556 /* PC, before IRQ/trap - this is one instruction above */
557 swi r17, r1, PTO+PT_PC;
558 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200559 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200560 addik r15, r0, ret_from_exc-8
Michal Simekca545022009-05-26 16:30:21 +0200561 mfs r6, rear /* parameter unsigned long address */
Michal Simekca545022009-05-26 16:30:21 +0200562 ori r7, r0, 0 /* parameter unsigned long error_code */
Michal Simek9814cc12010-06-22 18:09:29 +0200563 rted r0, do_page_fault
564 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200565
566/* Entry point used to return from an exception. */
567C_ENTRY(ret_from_exc):
Michal Simek77f6d222010-06-22 16:39:56 +0200568 lwi r11, r1, PTO + PT_MODE;
Michal Simekca545022009-05-26 16:30:21 +0200569 bnei r11, 2f; /* See if returning to kernel mode, */
570 /* ... if so, skip resched &c. */
571
572 /* We're returning to user mode, so check for various conditions that
573 trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100574 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200575 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
576 andi r11, r11, _TIF_NEED_RESCHED;
577 beqi r11, 5f;
578
579/* Call the scheduler before returning from a syscall/trap. */
580 bralid r15, schedule; /* Call scheduler */
581 nop; /* delay slot */
582
583 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01005845: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200585 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
586 andi r11, r11, _TIF_SIGPENDING;
587 beqi r11, 1f; /* Signals to handle, handle them */
588
589 /*
590 * Handle a signal return; Pending signals should be in r18.
591 *
592 * Not all registers are saved by the normal trap/interrupt entry
593 * points (for instance, call-saved registers (because the normal
594 * C-compiler calling sequence in the kernel makes sure they're
595 * preserved), and call-clobbered registers in the case of
596 * traps), but signal handlers may want to examine or change the
597 * complete register state. Here we save anything not saved by
598 * the normal entry sequence, so that it may be safely restored
Michal Simek36f60952010-06-22 13:27:43 +0200599 * (in a possibly modified form) after do_signal returns. */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200600 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200601 addi r7, r0, 0; /* Arg 3: int in_syscall */
602 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100603 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekca545022009-05-26 16:30:21 +0200604
605/* Finally, return to user state. */
Michal Simek96014cc2010-06-22 14:05:43 +02006061: set_bip; /* Ints masked for state restore */
Michal Simek8633beb2010-02-22 13:24:43 +0100607 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200608 VM_OFF;
609 tophys(r1,r1);
610
Michal Simekca545022009-05-26 16:30:21 +0200611 RESTORE_REGS;
612 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
613
614 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
615 bri 6f;
616/* Return to kernel state. */
Michal Simek96014cc2010-06-22 14:05:43 +02006172: set_bip; /* Ints masked for state restore */
618 VM_OFF;
Michal Simekca545022009-05-26 16:30:21 +0200619 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200620 RESTORE_REGS;
621 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
622
623 tovirt(r1,r1);
6246:
625EXC_return: /* Make global symbol for debugging */
626 rtbd r14, 0; /* Instructions to return from an IRQ */
627 nop;
628
629/*
630 * HW EXCEPTION rutine end
631 */
632
633/*
634 * Hardware maskable interrupts.
635 *
636 * The stack-pointer (r1) should have already been saved to the memory
637 * location PER_CPU(ENTRY_SP).
638 */
639C_ENTRY(_interrupt):
640/* MS: we are in physical address */
641/* Save registers, switch to proper stack, convert SP to virtual.*/
642 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
Michal Simekca545022009-05-26 16:30:21 +0200643 /* MS: See if already in kernel mode. */
Michal Simek653e4472010-06-22 14:51:45 +0200644 mfs r1, rmsr
Michal Simek5c0d72b2010-06-22 14:00:12 +0200645 nop
Michal Simek653e4472010-06-22 14:51:45 +0200646 andi r1, r1, MSR_UMS
647 bnei r1, 1f
Michal Simekca545022009-05-26 16:30:21 +0200648
649/* Kernel-mode state save. */
Michal Simek653e4472010-06-22 14:51:45 +0200650 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
651 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
Michal Simekca545022009-05-26 16:30:21 +0200652 /* save registers */
653/* MS: Make room on the stack -> activation record */
654 addik r1, r1, -STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200655 SAVE_REGS
Michal Simekca545022009-05-26 16:30:21 +0200656 brid 2f;
Michal Simek0a6b08f2010-06-22 20:49:46 +0200657 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
Michal Simekca545022009-05-26 16:30:21 +02006581:
659/* User-mode state save. */
Michal Simekca545022009-05-26 16:30:21 +0200660 /* MS: get the saved current */
661 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
662 tophys(r1,r1);
663 lwi r1, r1, TS_THREAD_INFO;
664 addik r1, r1, THREAD_SIZE;
665 tophys(r1,r1);
666 /* save registers */
667 addik r1, r1, -STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200668 SAVE_REGS
669 /* calculate mode */
670 swi r0, r1, PTO + PT_MODE;
671 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
672 swi r11, r1, PTO+PT_R1;
Michal Simek80c5ff62010-06-22 18:50:31 +0200673 clear_ums;
Michal Simekca545022009-05-26 16:30:21 +02006742:
Michal Simekb1d70c62010-01-22 10:24:06 +0100675 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200676 tovirt(r1,r1)
Michal Simekb9ea77e2010-07-28 12:40:02 +0200677 addik r15, r0, irq_call;
Michal Simek80c5ff62010-06-22 18:50:31 +0200678irq_call:rtbd r0, do_IRQ;
679 addik r5, r1, PTO;
Michal Simekca545022009-05-26 16:30:21 +0200680
681/* MS: we are in virtual mode */
682ret_from_irq:
683 lwi r11, r1, PTO + PT_MODE;
684 bnei r11, 2f;
685
Michal Simekb1d70c62010-01-22 10:24:06 +0100686 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200687 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
688 andi r11, r11, _TIF_NEED_RESCHED;
689 beqi r11, 5f
690 bralid r15, schedule;
691 nop; /* delay slot */
692
693 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01006945: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200695 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
696 andi r11, r11, _TIF_SIGPENDING;
697 beqid r11, no_intr_resched
698/* Handle a signal return; Pending signals should be in r18. */
699 addi r7, r0, 0; /* Arg 3: int in_syscall */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200700 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200701 bralid r15, do_signal; /* Handle any signals */
702 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
703
704/* Finally, return to user state. */
705no_intr_resched:
706 /* Disable interrupts, we are now committed to the state restore */
707 disable_irq
Michal Simek8633beb2010-02-22 13:24:43 +0100708 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
Michal Simekca545022009-05-26 16:30:21 +0200709 VM_OFF;
710 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200711 RESTORE_REGS
712 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
713 lwi r1, r1, PT_R1 - PT_SIZE;
714 bri 6f;
715/* MS: Return to kernel state. */
Michal Simek77753792010-01-12 09:55:10 +01007162:
717#ifdef CONFIG_PREEMPT
Michal Simekb1d70c62010-01-22 10:24:06 +0100718 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simek77753792010-01-12 09:55:10 +0100719 /* MS: get preempt_count from thread info */
720 lwi r5, r11, TI_PREEMPT_COUNT;
721 bgti r5, restore;
722
723 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
724 andi r5, r5, _TIF_NEED_RESCHED;
725 beqi r5, restore /* if zero jump over */
726
727preempt:
728 /* interrupts are off that's why I am calling preempt_chedule_irq */
729 bralid r15, preempt_schedule_irq
730 nop
Michal Simekb1d70c62010-01-22 10:24:06 +0100731 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek77753792010-01-12 09:55:10 +0100732 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
733 andi r5, r5, _TIF_NEED_RESCHED;
734 bnei r5, preempt /* if non zero jump to resched */
735restore:
736#endif
737 VM_OFF /* MS: turn off MMU */
Michal Simekca545022009-05-26 16:30:21 +0200738 tophys(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200739 RESTORE_REGS
740 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
741 tovirt(r1,r1);
7426:
743IRQ_return: /* MS: Make global symbol for debugging */
744 rtid r14, 0
745 nop
746
747/*
Michal Simek2d5973c2010-08-03 11:45:08 +0200748 * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
749 * and call handling function with saved pt_regs
Michal Simekca545022009-05-26 16:30:21 +0200750 */
751C_ENTRY(_debug_exception):
752 /* BIP bit is set on entry, no interrupts can occur */
753 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
754
Michal Simek653e4472010-06-22 14:51:45 +0200755 mfs r1, rmsr
Michal Simek5c0d72b2010-06-22 14:00:12 +0200756 nop
Michal Simek653e4472010-06-22 14:51:45 +0200757 andi r1, r1, MSR_UMS
758 bnei r1, 1f
Michal Simek2d5973c2010-08-03 11:45:08 +0200759/* MS: Kernel-mode state save - kgdb */
Michal Simek653e4472010-06-22 14:51:45 +0200760 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
Michal Simekca545022009-05-26 16:30:21 +0200761
Michal Simek2d5973c2010-08-03 11:45:08 +0200762 /* BIP bit is set on entry, no interrupts can occur */
763 addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200764 SAVE_REGS;
Michal Simek2d5973c2010-08-03 11:45:08 +0200765 /* save all regs to pt_reg structure */
766 swi r0, r1, PTO+PT_R0; /* R0 must be saved too */
767 swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */
Michal Simek2d5973c2010-08-03 11:45:08 +0200768 swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */
Michal Simek2d5973c2010-08-03 11:45:08 +0200769 /* save special purpose registers to pt_regs */
770 mfs r11, rear;
771 swi r11, r1, PTO+PT_EAR;
772 mfs r11, resr;
773 swi r11, r1, PTO+PT_ESR;
774 mfs r11, rfsr;
775 swi r11, r1, PTO+PT_FSR;
Michal Simekca545022009-05-26 16:30:21 +0200776
Michal Simek2d5973c2010-08-03 11:45:08 +0200777 /* stack pointer is in physical address at it is decrease
778 * by STATE_SAVE_SIZE but we need to get correct R1 value */
779 addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE;
780 swi r11, r1, PTO+PT_R1
781 /* MS: r31 - current pointer isn't changed */
782 tovirt(r1,r1)
783#ifdef CONFIG_KGDB
784 addi r5, r1, PTO /* pass pt_reg address as the first arg */
785 la r15, r0, dbtrap_call; /* return address */
786 rtbd r0, microblaze_kgdb_break
787 nop;
788#endif
789 /* MS: Place handler for brki from kernel space if KGDB is OFF.
790 * It is very unlikely that another brki instruction is called. */
791 bri 0
792
793/* MS: User-mode state save - gdb */
7941: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
Michal Simekca545022009-05-26 16:30:21 +0200795 tophys(r1,r1);
796 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
797 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
798 tophys(r1,r1);
799
800 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
Michal Simekca545022009-05-26 16:30:21 +0200801 SAVE_REGS;
Michal Simek751f1602010-08-03 11:26:51 +0200802 swi r16, r1, PTO+PT_PC; /* Save LP */
Michal Simek77f6d222010-06-22 16:39:56 +0200803 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
Michal Simekca545022009-05-26 16:30:21 +0200804 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
805 swi r11, r1, PTO+PT_R1; /* Store user SP. */
Michal Simek2d5973c2010-08-03 11:45:08 +0200806 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200807 tovirt(r1,r1)
Michal Simek06b28642010-06-22 15:25:24 +0200808 set_vms;
Michal Simek751f1602010-08-03 11:26:51 +0200809 addik r5, r1, PTO;
Michal Simekb9ea77e2010-07-28 12:40:02 +0200810 addik r15, r0, dbtrap_call;
Michal Simek2d5973c2010-08-03 11:45:08 +0200811dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
Michal Simek751f1602010-08-03 11:26:51 +0200812 rtbd r0, sw_exception
813 nop
Michal Simekca545022009-05-26 16:30:21 +0200814
Michal Simek2d5973c2010-08-03 11:45:08 +0200815 /* MS: The first instruction for the second part of the gdb/kgdb */
816 set_bip; /* Ints masked for state restore */
Michal Simek77f6d222010-06-22 16:39:56 +0200817 lwi r11, r1, PTO + PT_MODE;
Michal Simekca545022009-05-26 16:30:21 +0200818 bnei r11, 2f;
Michal Simek2d5973c2010-08-03 11:45:08 +0200819/* MS: Return to user space - gdb */
Michal Simekca545022009-05-26 16:30:21 +0200820 /* Get current task ptr into r11 */
Michal Simekb1d70c62010-01-22 10:24:06 +0100821 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200822 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
823 andi r11, r11, _TIF_NEED_RESCHED;
824 beqi r11, 5f;
825
Michal Simek2d5973c2010-08-03 11:45:08 +0200826 /* Call the scheduler before returning from a syscall/trap. */
Michal Simekca545022009-05-26 16:30:21 +0200827 bralid r15, schedule; /* Call scheduler */
828 nop; /* delay slot */
Michal Simekca545022009-05-26 16:30:21 +0200829
830 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01008315: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200832 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
833 andi r11, r11, _TIF_SIGPENDING;
834 beqi r11, 1f; /* Signals to handle, handle them */
835
Michal Simekb9ea77e2010-07-28 12:40:02 +0200836 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200837 addi r7, r0, 0; /* Arg 3: int in_syscall */
838 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100839 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekca545022009-05-26 16:30:21 +0200840
Michal Simekca545022009-05-26 16:30:21 +0200841/* Finally, return to user state. */
Michal Simek2d5973c2010-08-03 11:45:08 +02008421: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200843 VM_OFF;
844 tophys(r1,r1);
Michal Simek2d5973c2010-08-03 11:45:08 +0200845 /* MS: Restore all regs */
Michal Simekca545022009-05-26 16:30:21 +0200846 RESTORE_REGS
Michal Simek2d5973c2010-08-03 11:45:08 +0200847 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */
848 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
849DBTRAP_return_user: /* MS: Make global symbol for debugging */
850 rtbd r16, 0; /* MS: Instructions to return from a debug trap */
851 nop;
Michal Simekca545022009-05-26 16:30:21 +0200852
Michal Simek2d5973c2010-08-03 11:45:08 +0200853/* MS: Return to kernel state - kgdb */
Michal Simekca545022009-05-26 16:30:21 +02008542: VM_OFF;
855 tophys(r1,r1);
Michal Simek2d5973c2010-08-03 11:45:08 +0200856 /* MS: Restore all regs */
Michal Simekca545022009-05-26 16:30:21 +0200857 RESTORE_REGS
Michal Simek2d5973c2010-08-03 11:45:08 +0200858 lwi r14, r1, PTO+PT_R14;
859 lwi r16, r1, PTO+PT_PC;
Michal Simek2d5973c2010-08-03 11:45:08 +0200860 addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */
Michal Simekca545022009-05-26 16:30:21 +0200861 tovirt(r1,r1);
Michal Simek2d5973c2010-08-03 11:45:08 +0200862DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
863 rtbd r16, 0; /* MS: Instructions to return from a debug trap */
Michal Simekca545022009-05-26 16:30:21 +0200864 nop;
865
866
Michal Simekca545022009-05-26 16:30:21 +0200867ENTRY(_switch_to)
868 /* prepare return value */
Michal Simekb1d70c62010-01-22 10:24:06 +0100869 addk r3, r0, CURRENT_TASK
Michal Simekca545022009-05-26 16:30:21 +0200870
871 /* save registers in cpu_context */
872 /* use r11 and r12, volatile registers, as temp register */
873 /* give start of cpu_context for previous process */
874 addik r11, r5, TI_CPU_CONTEXT
875 swi r1, r11, CC_R1
876 swi r2, r11, CC_R2
877 /* skip volatile registers.
878 * they are saved on stack when we jumped to _switch_to() */
879 /* dedicated registers */
880 swi r13, r11, CC_R13
881 swi r14, r11, CC_R14
882 swi r15, r11, CC_R15
883 swi r16, r11, CC_R16
884 swi r17, r11, CC_R17
885 swi r18, r11, CC_R18
886 /* save non-volatile registers */
887 swi r19, r11, CC_R19
888 swi r20, r11, CC_R20
889 swi r21, r11, CC_R21
890 swi r22, r11, CC_R22
891 swi r23, r11, CC_R23
892 swi r24, r11, CC_R24
893 swi r25, r11, CC_R25
894 swi r26, r11, CC_R26
895 swi r27, r11, CC_R27
896 swi r28, r11, CC_R28
897 swi r29, r11, CC_R29
898 swi r30, r11, CC_R30
899 /* special purpose registers */
900 mfs r12, rmsr
Michal Simekca545022009-05-26 16:30:21 +0200901 swi r12, r11, CC_MSR
902 mfs r12, rear
Michal Simekca545022009-05-26 16:30:21 +0200903 swi r12, r11, CC_EAR
904 mfs r12, resr
Michal Simekca545022009-05-26 16:30:21 +0200905 swi r12, r11, CC_ESR
906 mfs r12, rfsr
Michal Simekca545022009-05-26 16:30:21 +0200907 swi r12, r11, CC_FSR
908
Michal Simekb1d70c62010-01-22 10:24:06 +0100909 /* update r31, the current-give me pointer to task which will be next */
910 lwi CURRENT_TASK, r6, TI_TASK
Michal Simekca545022009-05-26 16:30:21 +0200911 /* stored it to current_save too */
Michal Simekb1d70c62010-01-22 10:24:06 +0100912 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
Michal Simekca545022009-05-26 16:30:21 +0200913
914 /* get new process' cpu context and restore */
915 /* give me start where start context of next task */
916 addik r11, r6, TI_CPU_CONTEXT
917
918 /* non-volatile registers */
919 lwi r30, r11, CC_R30
920 lwi r29, r11, CC_R29
921 lwi r28, r11, CC_R28
922 lwi r27, r11, CC_R27
923 lwi r26, r11, CC_R26
924 lwi r25, r11, CC_R25
925 lwi r24, r11, CC_R24
926 lwi r23, r11, CC_R23
927 lwi r22, r11, CC_R22
928 lwi r21, r11, CC_R21
929 lwi r20, r11, CC_R20
930 lwi r19, r11, CC_R19
931 /* dedicated registers */
932 lwi r18, r11, CC_R18
933 lwi r17, r11, CC_R17
934 lwi r16, r11, CC_R16
935 lwi r15, r11, CC_R15
936 lwi r14, r11, CC_R14
937 lwi r13, r11, CC_R13
938 /* skip volatile registers */
939 lwi r2, r11, CC_R2
940 lwi r1, r11, CC_R1
941
942 /* special purpose registers */
943 lwi r12, r11, CC_FSR
944 mts rfsr, r12
Michal Simekca545022009-05-26 16:30:21 +0200945 lwi r12, r11, CC_MSR
946 mts rmsr, r12
Michal Simekca545022009-05-26 16:30:21 +0200947
948 rtsd r15, 8
949 nop
950
951ENTRY(_reset)
952 brai 0x70; /* Jump back to FS-boot */
953
Michal Simekca545022009-05-26 16:30:21 +0200954 /* These are compiled and loaded into high memory, then
955 * copied into place in mach_early_setup */
956 .section .init.ivt, "ax"
957 .org 0x0
958 /* this is very important - here is the reset vector */
959 /* in current MMU branch you don't care what is here - it is
960 * used from bootloader site - but this is correct for FS-BOOT */
961 brai 0x70
962 nop
963 brai TOPHYS(_user_exception); /* syscall handler */
964 brai TOPHYS(_interrupt); /* Interrupt handler */
Michal Simek751f1602010-08-03 11:26:51 +0200965 brai TOPHYS(_debug_exception); /* debug trap handler */
Michal Simekca545022009-05-26 16:30:21 +0200966 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
967
Michal Simekca545022009-05-26 16:30:21 +0200968.section .rodata,"a"
969#include "syscall_table.S"
970
971syscall_table_size=(.-sys_call_table)
972
Steven J. Magnanice3266c2010-04-27 12:37:54 -0500973type_SYSCALL:
974 .ascii "SYSCALL\0"
975type_IRQ:
976 .ascii "IRQ\0"
977type_IRQ_PREEMPT:
978 .ascii "IRQ (PREEMPTED)\0"
979type_SYSCALL_PREEMPT:
980 .ascii " SYSCALL (PREEMPTED)\0"
981
982 /*
983 * Trap decoding for stack unwinder
984 * Tuples are (start addr, end addr, string)
985 * If return address lies on [start addr, end addr],
986 * unwinder displays 'string'
987 */
988
989 .align 4
990.global microblaze_trap_handlers
991microblaze_trap_handlers:
992 /* Exact matches come first */
993 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
994 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
995 /* Fuzzy matches go here */
996 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
997 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
998 /* End of table */
999 .word 0 ; .word 0 ; .word 0