blob: 0747e1d61d553d4064e59f7e9701feed211b64d2 [file] [log] [blame]
Michal Simekca545022009-05-26 16:30:21 +02001/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
Michal Simek11d51362009-12-07 08:21:34 +010034#undef DEBUG
35
Michal Simekca545022009-05-26 16:30:21 +020036/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
Michal Simek66f7de862010-06-22 17:52:47 +020051 msrclr r0, MSR_BIP
Michal Simekca545022009-05-26 16:30:21 +020052 nop
53 .endm
54
55 .macro set_bip
Michal Simek66f7de862010-06-22 17:52:47 +020056 msrset r0, MSR_BIP
Michal Simekca545022009-05-26 16:30:21 +020057 nop
58 .endm
59
60 .macro clear_eip
Michal Simek66f7de862010-06-22 17:52:47 +020061 msrclr r0, MSR_EIP
Michal Simekca545022009-05-26 16:30:21 +020062 nop
63 .endm
64
65 .macro set_ee
Michal Simek66f7de862010-06-22 17:52:47 +020066 msrset r0, MSR_EE
Michal Simekca545022009-05-26 16:30:21 +020067 nop
68 .endm
69
70 .macro disable_irq
Michal Simek66f7de862010-06-22 17:52:47 +020071 msrclr r0, MSR_IE
Michal Simekca545022009-05-26 16:30:21 +020072 nop
73 .endm
74
75 .macro enable_irq
Michal Simek66f7de862010-06-22 17:52:47 +020076 msrset r0, MSR_IE
Michal Simekca545022009-05-26 16:30:21 +020077 nop
78 .endm
79
80 .macro set_ums
Michal Simek66f7de862010-06-22 17:52:47 +020081 msrset r0, MSR_UMS
Michal Simekca545022009-05-26 16:30:21 +020082 nop
Michal Simek66f7de862010-06-22 17:52:47 +020083 msrclr r0, MSR_VMS
Michal Simekca545022009-05-26 16:30:21 +020084 nop
85 .endm
86
87 .macro set_vms
Michal Simek66f7de862010-06-22 17:52:47 +020088 msrclr r0, MSR_UMS
Michal Simekca545022009-05-26 16:30:21 +020089 nop
Michal Simek66f7de862010-06-22 17:52:47 +020090 msrset r0, MSR_VMS
Michal Simekca545022009-05-26 16:30:21 +020091 nop
92 .endm
93
Michal Simekb3180672010-06-22 17:46:27 +020094 .macro clear_ums
Michal Simek66f7de862010-06-22 17:52:47 +020095 msrclr r0, MSR_UMS
Michal Simekb3180672010-06-22 17:46:27 +020096 nop
97 .endm
98
Michal Simekca545022009-05-26 16:30:21 +020099 .macro clear_vms_ums
Michal Simek66f7de862010-06-22 17:52:47 +0200100 msrclr r0, MSR_VMS | MSR_UMS
Michal Simekca545022009-05-26 16:30:21 +0200101 nop
102 .endm
103#else
104 .macro clear_bip
105 mfs r11, rmsr
106 nop
107 andi r11, r11, ~MSR_BIP
108 mts rmsr, r11
109 nop
110 .endm
111
112 .macro set_bip
113 mfs r11, rmsr
114 nop
115 ori r11, r11, MSR_BIP
116 mts rmsr, r11
117 nop
118 .endm
119
120 .macro clear_eip
121 mfs r11, rmsr
122 nop
123 andi r11, r11, ~MSR_EIP
124 mts rmsr, r11
125 nop
126 .endm
127
128 .macro set_ee
129 mfs r11, rmsr
130 nop
131 ori r11, r11, MSR_EE
132 mts rmsr, r11
133 nop
134 .endm
135
136 .macro disable_irq
137 mfs r11, rmsr
138 nop
139 andi r11, r11, ~MSR_IE
140 mts rmsr, r11
141 nop
142 .endm
143
144 .macro enable_irq
145 mfs r11, rmsr
146 nop
147 ori r11, r11, MSR_IE
148 mts rmsr, r11
149 nop
150 .endm
151
152 .macro set_ums
153 mfs r11, rmsr
154 nop
155 ori r11, r11, MSR_VMS
156 andni r11, r11, MSR_UMS
157 mts rmsr, r11
158 nop
159 .endm
160
161 .macro set_vms
162 mfs r11, rmsr
163 nop
164 ori r11, r11, MSR_VMS
165 andni r11, r11, MSR_UMS
166 mts rmsr, r11
167 nop
168 .endm
169
Michal Simekb3180672010-06-22 17:46:27 +0200170 .macro clear_ums
171 mfs r11, rmsr
172 nop
173 andni r11, r11, MSR_UMS
174 mts rmsr,r11
175 nop
176 .endm
177
Michal Simekca545022009-05-26 16:30:21 +0200178 .macro clear_vms_ums
179 mfs r11, rmsr
180 nop
181 andni r11, r11, (MSR_VMS|MSR_UMS)
182 mts rmsr,r11
183 nop
184 .endm
185#endif
186
187/* Define how to call high-level functions. With MMU, virtual mode must be
188 * enabled when calling the high-level function. Clobbers R11.
189 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
190 */
191
192/* turn on virtual protected mode save */
193#define VM_ON \
Michal Simeka4a94db2010-06-22 13:15:53 +0200194 set_ums; \
Michal Simekca545022009-05-26 16:30:21 +0200195 rted r0, 2f; \
Michal Simeka4a94db2010-06-22 13:15:53 +0200196 nop; \
1972:
Michal Simekca545022009-05-26 16:30:21 +0200198
199/* turn off virtual protected mode save and user mode save*/
200#define VM_OFF \
Michal Simeka4a94db2010-06-22 13:15:53 +0200201 clear_vms_ums; \
Michal Simekca545022009-05-26 16:30:21 +0200202 rted r0, TOPHYS(1f); \
Michal Simeka4a94db2010-06-22 13:15:53 +0200203 nop; \
2041:
Michal Simekca545022009-05-26 16:30:21 +0200205
206#define SAVE_REGS \
207 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
Michal Simek36f60952010-06-22 13:27:43 +0200208 swi r3, r1, PTO+PT_R3; \
209 swi r4, r1, PTO+PT_R4; \
Michal Simekca545022009-05-26 16:30:21 +0200210 swi r5, r1, PTO+PT_R5; \
211 swi r6, r1, PTO+PT_R6; \
212 swi r7, r1, PTO+PT_R7; \
213 swi r8, r1, PTO+PT_R8; \
214 swi r9, r1, PTO+PT_R9; \
215 swi r10, r1, PTO+PT_R10; \
216 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
217 swi r12, r1, PTO+PT_R12; \
218 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
219 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
220 swi r15, r1, PTO+PT_R15; /* Save LP */ \
221 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
222 swi r19, r1, PTO+PT_R19; \
223 swi r20, r1, PTO+PT_R20; \
224 swi r21, r1, PTO+PT_R21; \
225 swi r22, r1, PTO+PT_R22; \
226 swi r23, r1, PTO+PT_R23; \
227 swi r24, r1, PTO+PT_R24; \
228 swi r25, r1, PTO+PT_R25; \
229 swi r26, r1, PTO+PT_R26; \
230 swi r27, r1, PTO+PT_R27; \
231 swi r28, r1, PTO+PT_R28; \
232 swi r29, r1, PTO+PT_R29; \
233 swi r30, r1, PTO+PT_R30; \
234 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
235 mfs r11, rmsr; /* save MSR */ \
236 nop; \
237 swi r11, r1, PTO+PT_MSR;
238
239#define RESTORE_REGS \
240 lwi r11, r1, PTO+PT_MSR; \
241 mts rmsr , r11; \
242 nop; \
243 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
Michal Simek36f60952010-06-22 13:27:43 +0200244 lwi r3, r1, PTO+PT_R3; \
245 lwi r4, r1, PTO+PT_R4; \
Michal Simekca545022009-05-26 16:30:21 +0200246 lwi r5, r1, PTO+PT_R5; \
247 lwi r6, r1, PTO+PT_R6; \
248 lwi r7, r1, PTO+PT_R7; \
249 lwi r8, r1, PTO+PT_R8; \
250 lwi r9, r1, PTO+PT_R9; \
251 lwi r10, r1, PTO+PT_R10; \
252 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
253 lwi r12, r1, PTO+PT_R12; \
254 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
255 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
256 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
257 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
258 lwi r19, r1, PTO+PT_R19; \
259 lwi r20, r1, PTO+PT_R20; \
260 lwi r21, r1, PTO+PT_R21; \
261 lwi r22, r1, PTO+PT_R22; \
262 lwi r23, r1, PTO+PT_R23; \
263 lwi r24, r1, PTO+PT_R24; \
264 lwi r25, r1, PTO+PT_R25; \
265 lwi r26, r1, PTO+PT_R26; \
266 lwi r27, r1, PTO+PT_R27; \
267 lwi r28, r1, PTO+PT_R28; \
268 lwi r29, r1, PTO+PT_R29; \
269 lwi r30, r1, PTO+PT_R30; \
270 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
271
Michal Simeke5d2af22010-06-22 17:58:26 +0200272#define SAVE_STATE \
273 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
274 /* See if already in kernel mode.*/ \
275 mfs r1, rmsr; \
276 nop; \
277 andi r1, r1, MSR_UMS; \
278 bnei r1, 1f; \
279 /* Kernel-mode state save. */ \
280 /* Reload kernel stack-ptr. */ \
281 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
282 tophys(r1,r1); \
283 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
284 SAVE_REGS \
Michal Simeke5d2af22010-06-22 17:58:26 +0200285 brid 2f; \
Michal Simekda233552010-06-22 18:02:06 +0200286 swi r1, r1, PTO+PT_MODE; \
Michal Simeke5d2af22010-06-22 17:58:26 +02002871: /* User-mode state save. */ \
288 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
289 tophys(r1,r1); \
290 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
291 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
292 tophys(r1,r1); \
293 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
294 SAVE_REGS \
Michal Simeke5d2af22010-06-22 17:58:26 +0200295 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
296 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
Michal Simeke7741072010-06-22 18:00:35 +0200297 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
Michal Simeke5d2af22010-06-22 17:58:26 +0200298 /* MS: I am clearing UMS even in case when I come from kernel space */ \
299 clear_ums; \
3002: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
301
Michal Simekca545022009-05-26 16:30:21 +0200302.text
303
304/*
305 * User trap.
306 *
307 * System calls are handled here.
308 *
309 * Syscall protocol:
310 * Syscall number in r12, args in r5-r10
311 * Return value in r3
312 *
313 * Trap entered via brki instruction, so BIP bit is set, and interrupts
314 * are masked. This is nice, means we don't have to CLI before state save
315 */
316C_ENTRY(_user_exception):
317 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
318 addi r14, r14, 4 /* return address is 4 byte after call */
Michal Simekca545022009-05-26 16:30:21 +0200319
Michal Simek653e4472010-06-22 14:51:45 +0200320 mfs r1, rmsr
Michal Simek5c0d72b2010-06-22 14:00:12 +0200321 nop
Michal Simek653e4472010-06-22 14:51:45 +0200322 andi r1, r1, MSR_UMS
323 bnei r1, 1f
Michal Simek5c0d72b2010-06-22 14:00:12 +0200324
325/* Kernel-mode state save - kernel execve */
Michal Simek653e4472010-06-22 14:51:45 +0200326 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
327 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200328
329 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
330 SAVE_REGS
331
Michal Simek77f6d222010-06-22 16:39:56 +0200332 swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
Michal Simekca545022009-05-26 16:30:21 +0200333 brid 2f;
334 nop; /* Fill delay slot */
335
336/* User-mode state save. */
3371:
Michal Simekca545022009-05-26 16:30:21 +0200338 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
339 tophys(r1,r1);
340 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
341/* calculate kernel stack pointer from task struct 8k */
342 addik r1, r1, THREAD_SIZE;
343 tophys(r1,r1);
344
345 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
346 SAVE_REGS
347
Michal Simek77f6d222010-06-22 16:39:56 +0200348 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
Michal Simekca545022009-05-26 16:30:21 +0200349 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
350 swi r11, r1, PTO+PT_R1; /* Store user SP. */
Michal Simekb1d70c62010-01-22 10:24:06 +01003512: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200352 /* Save away the syscall number. */
353 swi r12, r1, PTO+PT_R0;
354 tovirt(r1,r1)
355
Michal Simekca545022009-05-26 16:30:21 +0200356/* where the trap should return need -8 to adjust for rtsd r15, 8*/
357/* Jump to the appropriate function for the system call number in r12
358 * (r12 is not preserved), or return an error if r12 is not valid. The LP
359 * register should point to the location where
360 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
Michal Simek23575482009-08-24 13:26:04 +0200361
362 # Step into virtual mode.
363 set_vms;
364 addik r11, r0, 3f
365 rtid r11, 0
366 nop
3673:
Michal Simekb1d70c62010-01-22 10:24:06 +0100368 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200369 lwi r11, r11, TI_FLAGS /* get flags in thread info */
370 andi r11, r11, _TIF_WORK_SYSCALL_MASK
371 beqi r11, 4f
372
373 addik r3, r0, -ENOSYS
374 swi r3, r1, PTO + PT_R3
375 brlid r15, do_syscall_trace_enter
376 addik r5, r1, PTO + PT_R0
377
378 # do_syscall_trace_enter returns the new syscall nr.
379 addk r12, r0, r3
380 lwi r5, r1, PTO+PT_R5;
381 lwi r6, r1, PTO+PT_R6;
382 lwi r7, r1, PTO+PT_R7;
383 lwi r8, r1, PTO+PT_R8;
384 lwi r9, r1, PTO+PT_R9;
385 lwi r10, r1, PTO+PT_R10;
3864:
387/* Jump to the appropriate function for the system call number in r12
388 * (r12 is not preserved), or return an error if r12 is not valid.
389 * The LP register should point to the location where the called function
390 * should return. [note that MAKE_SYS_CALL uses label 1] */
391 /* See if the system call number is valid */
Michal Simekca545022009-05-26 16:30:21 +0200392 addi r11, r12, -__NR_syscalls;
Michal Simek23575482009-08-24 13:26:04 +0200393 bgei r11,5f;
Michal Simekca545022009-05-26 16:30:21 +0200394 /* Figure out which function to use for this system call. */
395 /* Note Microblaze barrel shift is optional, so don't rely on it */
396 add r12, r12, r12; /* convert num -> ptr */
397 add r12, r12, r12;
398
Michal Simek11d51362009-12-07 08:21:34 +0100399#ifdef DEBUG
Michal Simekca545022009-05-26 16:30:21 +0200400 /* Trac syscalls and stored them to r0_ram */
Michal Simek23575482009-08-24 13:26:04 +0200401 lwi r3, r12, 0x400 + r0_ram
Michal Simekca545022009-05-26 16:30:21 +0200402 addi r3, r3, 1
Michal Simek23575482009-08-24 13:26:04 +0200403 swi r3, r12, 0x400 + r0_ram
Michal Simek11d51362009-12-07 08:21:34 +0100404#endif
Michal Simekca545022009-05-26 16:30:21 +0200405
Michal Simek23575482009-08-24 13:26:04 +0200406 # Find and jump into the syscall handler.
407 lwi r12, r12, sys_call_table
408 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200409 addi r15, r0, ret_from_trap-8
Michal Simek23575482009-08-24 13:26:04 +0200410 bra r12
411
Michal Simekca545022009-05-26 16:30:21 +0200412 /* The syscall number is invalid, return an error. */
Michal Simek23575482009-08-24 13:26:04 +02004135:
Michal Simekca545022009-05-26 16:30:21 +0200414 addi r3, r0, -ENOSYS;
415 rtsd r15,8; /* looks like a normal subroutine return */
416 or r0, r0, r0
417
418
Michal Simek23575482009-08-24 13:26:04 +0200419/* Entry point used to return from a syscall/trap */
Michal Simekca545022009-05-26 16:30:21 +0200420/* We re-enable BIP bit before state restore */
421C_ENTRY(ret_from_trap):
Michal Simekb1d70c62010-01-22 10:24:06 +0100422 swi r3, r1, PTO + PT_R3
423 swi r4, r1, PTO + PT_R4
424
Michal Simek77f6d222010-06-22 16:39:56 +0200425 lwi r11, r1, PTO + PT_MODE;
Michal Simek36f60952010-06-22 13:27:43 +0200426/* See if returning to kernel mode, if so, skip resched &c. */
427 bnei r11, 2f;
Michal Simekca545022009-05-26 16:30:21 +0200428 /* We're returning to user mode, so check for various conditions that
429 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100430 /* FIXME: Restructure all these flag checks. */
431 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek23575482009-08-24 13:26:04 +0200432 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
433 andi r11, r11, _TIF_WORK_SYSCALL_MASK
434 beqi r11, 1f
435
Michal Simek23575482009-08-24 13:26:04 +0200436 brlid r15, do_syscall_trace_leave
437 addik r5, r1, PTO + PT_R0
Michal Simek23575482009-08-24 13:26:04 +02004381:
Michal Simek23575482009-08-24 13:26:04 +0200439 /* We're returning to user mode, so check for various conditions that
440 * trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100441 /* get thread info from current task */
442 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200443 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
444 andi r11, r11, _TIF_NEED_RESCHED;
445 beqi r11, 5f;
446
Michal Simekca545022009-05-26 16:30:21 +0200447 bralid r15, schedule; /* Call scheduler */
448 nop; /* delay slot */
Michal Simekca545022009-05-26 16:30:21 +0200449
450 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01004515: /* get thread info from current task*/
452 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200453 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
454 andi r11, r11, _TIF_SIGPENDING;
455 beqi r11, 1f; /* Signals to handle, handle them */
456
Michal Simekb9ea77e2010-07-28 12:40:02 +0200457 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200458 addi r7, r0, 1; /* Arg 3: int in_syscall */
459 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100460 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekb1d70c62010-01-22 10:24:06 +0100461
462/* Finally, return to user state. */
Michal Simek96014cc2010-06-22 14:05:43 +02004631: set_bip; /* Ints masked for state restore */
Michal Simek8633beb2010-02-22 13:24:43 +0100464 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200465 VM_OFF;
466 tophys(r1,r1);
467 RESTORE_REGS;
468 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
469 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
470 bri 6f;
471
472/* Return to kernel state. */
Michal Simek96014cc2010-06-22 14:05:43 +02004732: set_bip; /* Ints masked for state restore */
474 VM_OFF;
Michal Simekca545022009-05-26 16:30:21 +0200475 tophys(r1,r1);
476 RESTORE_REGS;
477 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
478 tovirt(r1,r1);
4796:
480TRAP_return: /* Make global symbol for debugging */
481 rtbd r14, 0; /* Instructions to return from an IRQ */
482 nop;
483
484
485/* These syscalls need access to the struct pt_regs on the stack, so we
486 implement them in assembly (they're basically all wrappers anyway). */
487
488C_ENTRY(sys_fork_wrapper):
489 addi r5, r0, SIGCHLD /* Arg 0: flags */
490 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200491 addik r7, r1, PTO /* Arg 2: parent context */
Michal Simekca545022009-05-26 16:30:21 +0200492 add r8. r0, r0 /* Arg 3: (unused) */
493 add r9, r0, r0; /* Arg 4: (unused) */
494 add r10, r0, r0; /* Arg 5: (unused) */
495 brid do_fork /* Do real work (tail-call) */
496 nop;
497
498/* This the initial entry point for a new child thread, with an appropriate
499 stack in place that makes it look the the child is in the middle of an
500 syscall. This function is actually `returned to' from switch_thread
501 (copy_thread makes ret_from_fork the return address in each new thread's
502 saved context). */
503C_ENTRY(ret_from_fork):
504 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
505 add r3, r5, r0; /* switch_thread returns the prev task */
506 /* ( in the delay slot ) */
507 add r3, r0, r0; /* Child's fork call should return 0. */
508 brid ret_from_trap; /* Do normal trap return */
509 nop;
510
Arnd Bergmanne5135882009-06-18 19:55:30 +0200511C_ENTRY(sys_vfork):
512 brid microblaze_vfork /* Do real work (tail-call) */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200513 addik r5, r1, PTO
Michal Simekca545022009-05-26 16:30:21 +0200514
Arnd Bergmanne5135882009-06-18 19:55:30 +0200515C_ENTRY(sys_clone):
Michal Simekca545022009-05-26 16:30:21 +0200516 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
Michal Simek570e3e22010-06-04 13:06:27 +0200517 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
Michal Simekb9ea77e2010-07-28 12:40:02 +02005181: addik r7, r1, PTO; /* Arg 2: parent context */
519 add r8, r0, r0; /* Arg 3: (unused) */
520 add r9, r0, r0; /* Arg 4: (unused) */
521 add r10, r0, r0; /* Arg 5: (unused) */
522 brid do_fork /* Do real work (tail-call) */
523 nop;
Michal Simekca545022009-05-26 16:30:21 +0200524
Arnd Bergmanne5135882009-06-18 19:55:30 +0200525C_ENTRY(sys_execve):
Michal Simekb9ea77e2010-07-28 12:40:02 +0200526 addik r8, r1, PTO; /* add user context as 4th arg */
Arnd Bergmanne5135882009-06-18 19:55:30 +0200527 brid microblaze_execve; /* Do real work (tail-call).*/
Michal Simekca545022009-05-26 16:30:21 +0200528 nop;
529
Michal Simekca545022009-05-26 16:30:21 +0200530C_ENTRY(sys_rt_sigreturn_wrapper):
531 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
532 swi r4, r1, PTO+PT_R4;
Michal Simekb9ea77e2010-07-28 12:40:02 +0200533 addik r5, r1, PTO; /* add user context as 1st arg */
Michal Simekca545022009-05-26 16:30:21 +0200534 brlid r15, sys_rt_sigreturn /* Do real work */
535 nop;
536 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
537 lwi r4, r1, PTO+PT_R4;
538 bri ret_from_trap /* fall through will not work here due to align */
539 nop;
540
541/*
542 * HW EXCEPTION rutine start
543 */
Michal Simekca545022009-05-26 16:30:21 +0200544C_ENTRY(full_exception_trap):
Michal Simekca545022009-05-26 16:30:21 +0200545 /* adjust exception address for privileged instruction
546 * for finding where is it */
547 addik r17, r17, -4
548 SAVE_STATE /* Save registers */
Michal Simek06a54602010-06-22 16:22:01 +0200549 /* PC, before IRQ/trap - this is one instruction above */
550 swi r17, r1, PTO+PT_PC;
551 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200552 /* FIXME this can be store directly in PT_ESR reg.
553 * I tested it but there is a fault */
554 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200555 addik r15, r0, ret_from_exc - 8
556 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200557 mfs r6, resr
558 nop
559 mfs r7, rfsr; /* save FSR */
560 nop
Michal Simek131e4e92009-09-28 08:50:53 +0200561 mts rfsr, r0; /* Clear sticky fsr */
562 nop
Michal Simekc318d482010-06-22 16:25:31 +0200563 rted r0, full_exception
564 nop
Michal Simekca545022009-05-26 16:30:21 +0200565
566/*
567 * Unaligned data trap.
568 *
569 * Unaligned data trap last on 4k page is handled here.
570 *
571 * Trap entered via exception, so EE bit is set, and interrupts
572 * are masked. This is nice, means we don't have to CLI before state save
573 *
574 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
575 */
576C_ENTRY(unaligned_data_trap):
Michal Simek8b110d12010-06-17 16:03:05 +0200577 /* MS: I have to save r11 value and then restore it because
578 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
579 * instructions are not used. We don't need to do if MSR instructions
580 * are used and they use r0 instead of r11.
581 * I am using ENTRY_SP which should be primary used only for stack
582 * pointer saving. */
583 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
584 set_bip; /* equalize initial state for all possible entries */
585 clear_eip;
586 set_ee;
587 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
Michal Simekca545022009-05-26 16:30:21 +0200588 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200589 /* PC, before IRQ/trap - this is one instruction above */
590 swi r17, r1, PTO+PT_PC;
591 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200592 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200593 addik r15, r0, ret_from_exc-8
Michal Simekca545022009-05-26 16:30:21 +0200594 mfs r3, resr /* ESR */
595 nop
596 mfs r4, rear /* EAR */
597 nop
Michal Simekc318d482010-06-22 16:25:31 +0200598 rtbd r0, _unaligned_data_exception
Michal Simekb9ea77e2010-07-28 12:40:02 +0200599 addik r7, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200600
601/*
602 * Page fault traps.
603 *
604 * If the real exception handler (from hw_exception_handler.S) didn't find
605 * the mapping for the process, then we're thrown here to handle such situation.
606 *
607 * Trap entered via exceptions, so EE bit is set, and interrupts
608 * are masked. This is nice, means we don't have to CLI before state save
609 *
610 * Build a standard exception frame for TLB Access errors. All TLB exceptions
611 * will bail out to this point if they can't resolve the lightweight TLB fault.
612 *
613 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
614 * void do_page_fault(struct pt_regs *regs,
615 * unsigned long address,
616 * unsigned long error_code)
617 */
618/* data and intruction trap - which is choose is resolved int fault.c */
619C_ENTRY(page_fault_data_trap):
Michal Simekca545022009-05-26 16:30:21 +0200620 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200621 /* PC, before IRQ/trap - this is one instruction above */
622 swi r17, r1, PTO+PT_PC;
623 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200624 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200625 addik r15, r0, ret_from_exc-8
626 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200627 mfs r6, rear /* parameter unsigned long address */
628 nop
629 mfs r7, resr /* parameter unsigned long error_code */
630 nop
Michal Simekc318d482010-06-22 16:25:31 +0200631 rted r0, do_page_fault
632 nop
Michal Simekca545022009-05-26 16:30:21 +0200633
634C_ENTRY(page_fault_instr_trap):
Michal Simekca545022009-05-26 16:30:21 +0200635 SAVE_STATE /* Save registers.*/
Michal Simek06a54602010-06-22 16:22:01 +0200636 /* PC, before IRQ/trap - this is one instruction above */
637 swi r17, r1, PTO+PT_PC;
638 tovirt(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200639 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200640 addik r15, r0, ret_from_exc-8
641 addik r5, r1, PTO /* parameter struct pt_regs * regs */
Michal Simekca545022009-05-26 16:30:21 +0200642 mfs r6, rear /* parameter unsigned long address */
643 nop
Michal Simekc318d482010-06-22 16:25:31 +0200644 rted r0, do_page_fault
Michal Simekca545022009-05-26 16:30:21 +0200645 ori r7, r0, 0 /* parameter unsigned long error_code */
Michal Simekca545022009-05-26 16:30:21 +0200646
647/* Entry point used to return from an exception. */
648C_ENTRY(ret_from_exc):
Michal Simek77f6d222010-06-22 16:39:56 +0200649 lwi r11, r1, PTO + PT_MODE;
Michal Simekca545022009-05-26 16:30:21 +0200650 bnei r11, 2f; /* See if returning to kernel mode, */
651 /* ... if so, skip resched &c. */
652
653 /* We're returning to user mode, so check for various conditions that
654 trigger rescheduling. */
Michal Simekb1d70c62010-01-22 10:24:06 +0100655 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200656 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
657 andi r11, r11, _TIF_NEED_RESCHED;
658 beqi r11, 5f;
659
660/* Call the scheduler before returning from a syscall/trap. */
661 bralid r15, schedule; /* Call scheduler */
662 nop; /* delay slot */
663
664 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01006655: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200666 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
667 andi r11, r11, _TIF_SIGPENDING;
668 beqi r11, 1f; /* Signals to handle, handle them */
669
670 /*
671 * Handle a signal return; Pending signals should be in r18.
672 *
673 * Not all registers are saved by the normal trap/interrupt entry
674 * points (for instance, call-saved registers (because the normal
675 * C-compiler calling sequence in the kernel makes sure they're
676 * preserved), and call-clobbered registers in the case of
677 * traps), but signal handlers may want to examine or change the
678 * complete register state. Here we save anything not saved by
679 * the normal entry sequence, so that it may be safely restored
Michal Simek36f60952010-06-22 13:27:43 +0200680 * (in a possibly modified form) after do_signal returns. */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200681 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200682 addi r7, r0, 0; /* Arg 3: int in_syscall */
683 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100684 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekca545022009-05-26 16:30:21 +0200685
686/* Finally, return to user state. */
Michal Simek96014cc2010-06-22 14:05:43 +02006871: set_bip; /* Ints masked for state restore */
Michal Simek8633beb2010-02-22 13:24:43 +0100688 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200689 VM_OFF;
690 tophys(r1,r1);
691
Michal Simekca545022009-05-26 16:30:21 +0200692 RESTORE_REGS;
693 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
694
695 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
696 bri 6f;
697/* Return to kernel state. */
Michal Simek96014cc2010-06-22 14:05:43 +02006982: set_bip; /* Ints masked for state restore */
699 VM_OFF;
Michal Simekca545022009-05-26 16:30:21 +0200700 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200701 RESTORE_REGS;
702 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
703
704 tovirt(r1,r1);
7056:
706EXC_return: /* Make global symbol for debugging */
707 rtbd r14, 0; /* Instructions to return from an IRQ */
708 nop;
709
710/*
711 * HW EXCEPTION rutine end
712 */
713
714/*
715 * Hardware maskable interrupts.
716 *
717 * The stack-pointer (r1) should have already been saved to the memory
718 * location PER_CPU(ENTRY_SP).
719 */
720C_ENTRY(_interrupt):
721/* MS: we are in physical address */
722/* Save registers, switch to proper stack, convert SP to virtual.*/
723 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
Michal Simekca545022009-05-26 16:30:21 +0200724 /* MS: See if already in kernel mode. */
Michal Simek653e4472010-06-22 14:51:45 +0200725 mfs r1, rmsr
Michal Simek5c0d72b2010-06-22 14:00:12 +0200726 nop
Michal Simek653e4472010-06-22 14:51:45 +0200727 andi r1, r1, MSR_UMS
728 bnei r1, 1f
Michal Simekca545022009-05-26 16:30:21 +0200729
730/* Kernel-mode state save. */
Michal Simek653e4472010-06-22 14:51:45 +0200731 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
732 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
Michal Simekca545022009-05-26 16:30:21 +0200733 /* save registers */
734/* MS: Make room on the stack -> activation record */
735 addik r1, r1, -STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200736 SAVE_REGS
Michal Simek77f6d222010-06-22 16:39:56 +0200737 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
Michal Simekca545022009-05-26 16:30:21 +0200738 brid 2f;
739 nop; /* MS: Fill delay slot */
740
7411:
742/* User-mode state save. */
Michal Simekca545022009-05-26 16:30:21 +0200743 /* MS: get the saved current */
744 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
745 tophys(r1,r1);
746 lwi r1, r1, TS_THREAD_INFO;
747 addik r1, r1, THREAD_SIZE;
748 tophys(r1,r1);
749 /* save registers */
750 addik r1, r1, -STATE_SAVE_SIZE;
Michal Simekca545022009-05-26 16:30:21 +0200751 SAVE_REGS
752 /* calculate mode */
753 swi r0, r1, PTO + PT_MODE;
754 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
755 swi r11, r1, PTO+PT_R1;
Michal Simekca545022009-05-26 16:30:21 +02007562:
Michal Simekb1d70c62010-01-22 10:24:06 +0100757 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
Michal Simekca545022009-05-26 16:30:21 +0200758 tovirt(r1,r1)
Michal Simekb9ea77e2010-07-28 12:40:02 +0200759 addik r5, r1, PTO;
Michal Simekca545022009-05-26 16:30:21 +0200760 set_vms;
Michal Simekb9ea77e2010-07-28 12:40:02 +0200761 addik r11, r0, do_IRQ;
762 addik r15, r0, irq_call;
Michal Simekca545022009-05-26 16:30:21 +0200763irq_call:rtbd r11, 0;
764 nop;
765
766/* MS: we are in virtual mode */
767ret_from_irq:
768 lwi r11, r1, PTO + PT_MODE;
769 bnei r11, 2f;
770
Michal Simekb1d70c62010-01-22 10:24:06 +0100771 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simekca545022009-05-26 16:30:21 +0200772 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
773 andi r11, r11, _TIF_NEED_RESCHED;
774 beqi r11, 5f
775 bralid r15, schedule;
776 nop; /* delay slot */
777
778 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01007795: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200780 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
781 andi r11, r11, _TIF_SIGPENDING;
782 beqid r11, no_intr_resched
783/* Handle a signal return; Pending signals should be in r18. */
784 addi r7, r0, 0; /* Arg 3: int in_syscall */
Michal Simekb9ea77e2010-07-28 12:40:02 +0200785 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200786 bralid r15, do_signal; /* Handle any signals */
787 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
788
789/* Finally, return to user state. */
790no_intr_resched:
791 /* Disable interrupts, we are now committed to the state restore */
792 disable_irq
Michal Simek8633beb2010-02-22 13:24:43 +0100793 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
Michal Simekca545022009-05-26 16:30:21 +0200794 VM_OFF;
795 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200796 RESTORE_REGS
797 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
798 lwi r1, r1, PT_R1 - PT_SIZE;
799 bri 6f;
800/* MS: Return to kernel state. */
Michal Simek77753792010-01-12 09:55:10 +01008012:
802#ifdef CONFIG_PREEMPT
Michal Simekb1d70c62010-01-22 10:24:06 +0100803 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
Michal Simek77753792010-01-12 09:55:10 +0100804 /* MS: get preempt_count from thread info */
805 lwi r5, r11, TI_PREEMPT_COUNT;
806 bgti r5, restore;
807
808 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
809 andi r5, r5, _TIF_NEED_RESCHED;
810 beqi r5, restore /* if zero jump over */
811
812preempt:
813 /* interrupts are off that's why I am calling preempt_chedule_irq */
814 bralid r15, preempt_schedule_irq
815 nop
Michal Simekb1d70c62010-01-22 10:24:06 +0100816 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simek77753792010-01-12 09:55:10 +0100817 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
818 andi r5, r5, _TIF_NEED_RESCHED;
819 bnei r5, preempt /* if non zero jump to resched */
820restore:
821#endif
822 VM_OFF /* MS: turn off MMU */
Michal Simekca545022009-05-26 16:30:21 +0200823 tophys(r1,r1)
Michal Simekca545022009-05-26 16:30:21 +0200824 RESTORE_REGS
825 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
826 tovirt(r1,r1);
8276:
828IRQ_return: /* MS: Make global symbol for debugging */
829 rtid r14, 0
830 nop
831
832/*
833 * `Debug' trap
834 * We enter dbtrap in "BIP" (breakpoint) mode.
835 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
836 * original dbtrap.
837 * however, wait to save state first
838 */
839C_ENTRY(_debug_exception):
840 /* BIP bit is set on entry, no interrupts can occur */
841 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
842
Michal Simek653e4472010-06-22 14:51:45 +0200843 mfs r1, rmsr
Michal Simek5c0d72b2010-06-22 14:00:12 +0200844 nop
Michal Simek653e4472010-06-22 14:51:45 +0200845 andi r1, r1, MSR_UMS
846 bnei r1, 1f
Michal Simekca545022009-05-26 16:30:21 +0200847 /* Kernel-mode state save. */
Michal Simek653e4472010-06-22 14:51:45 +0200848 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
849 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200850
851 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
Michal Simekca545022009-05-26 16:30:21 +0200852 SAVE_REGS;
853
Michal Simek77f6d222010-06-22 16:39:56 +0200854 swi r1, r1, PTO + PT_MODE;
Michal Simekca545022009-05-26 16:30:21 +0200855 brid 2f;
856 nop; /* Fill delay slot */
8571: /* User-mode state save. */
Michal Simekca545022009-05-26 16:30:21 +0200858 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
859 tophys(r1,r1);
860 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
861 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
862 tophys(r1,r1);
863
864 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
Michal Simekca545022009-05-26 16:30:21 +0200865 SAVE_REGS;
866
Michal Simek77f6d222010-06-22 16:39:56 +0200867 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
Michal Simekca545022009-05-26 16:30:21 +0200868 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
869 swi r11, r1, PTO+PT_R1; /* Store user SP. */
Michal Simek653e4472010-06-22 14:51:45 +02008702:
Michal Simekca545022009-05-26 16:30:21 +0200871 tovirt(r1,r1)
872
Michal Simek06b28642010-06-22 15:25:24 +0200873 set_vms;
Michal Simekca545022009-05-26 16:30:21 +0200874 addi r5, r0, SIGTRAP /* send the trap signal */
875 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
876 addk r7, r0, r0 /* 3rd param zero */
Michal Simek06b28642010-06-22 15:25:24 +0200877dbtrap_call: rtbd r0, send_sig;
Michal Simekb9ea77e2010-07-28 12:40:02 +0200878 addik r15, r0, dbtrap_call;
Michal Simekca545022009-05-26 16:30:21 +0200879
880 set_bip; /* Ints masked for state restore*/
Michal Simek77f6d222010-06-22 16:39:56 +0200881 lwi r11, r1, PTO + PT_MODE;
Michal Simekca545022009-05-26 16:30:21 +0200882 bnei r11, 2f;
883
884 /* Get current task ptr into r11 */
Michal Simekb1d70c62010-01-22 10:24:06 +0100885 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200886 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
887 andi r11, r11, _TIF_NEED_RESCHED;
888 beqi r11, 5f;
889
890/* Call the scheduler before returning from a syscall/trap. */
891
892 bralid r15, schedule; /* Call scheduler */
893 nop; /* delay slot */
894 /* XXX Is PT_DTRACE handling needed here? */
895 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
896
897 /* Maybe handle a signal */
Michal Simekb1d70c62010-01-22 10:24:06 +01008985: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
Michal Simekca545022009-05-26 16:30:21 +0200899 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
900 andi r11, r11, _TIF_SIGPENDING;
901 beqi r11, 1f; /* Signals to handle, handle them */
902
903/* Handle a signal return; Pending signals should be in r18. */
904 /* Not all registers are saved by the normal trap/interrupt entry
905 points (for instance, call-saved registers (because the normal
906 C-compiler calling sequence in the kernel makes sure they're
907 preserved), and call-clobbered registers in the case of
908 traps), but signal handlers may want to examine or change the
909 complete register state. Here we save anything not saved by
910 the normal entry sequence, so that it may be safely restored
911 (in a possibly modified form) after do_signal returns. */
912
Michal Simekb9ea77e2010-07-28 12:40:02 +0200913 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
Michal Simekca545022009-05-26 16:30:21 +0200914 addi r7, r0, 0; /* Arg 3: int in_syscall */
915 bralid r15, do_signal; /* Handle any signals */
Michal Simek841d6e82010-01-22 14:28:36 +0100916 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
Michal Simekca545022009-05-26 16:30:21 +0200917
918
919/* Finally, return to user state. */
Michal Simek5c0d72b2010-06-22 14:00:12 +02009201:
Michal Simek8633beb2010-02-22 13:24:43 +0100921 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
Michal Simekca545022009-05-26 16:30:21 +0200922 VM_OFF;
923 tophys(r1,r1);
924
Michal Simekca545022009-05-26 16:30:21 +0200925 RESTORE_REGS
926 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
927
928
929 lwi r1, r1, PT_R1 - PT_SIZE;
930 /* Restore user stack pointer. */
931 bri 6f;
932
933/* Return to kernel state. */
9342: VM_OFF;
935 tophys(r1,r1);
Michal Simekca545022009-05-26 16:30:21 +0200936 RESTORE_REGS
937 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
938
939 tovirt(r1,r1);
9406:
941DBTRAP_return: /* Make global symbol for debugging */
942 rtbd r14, 0; /* Instructions to return from an IRQ */
943 nop;
944
945
946
947ENTRY(_switch_to)
948 /* prepare return value */
Michal Simekb1d70c62010-01-22 10:24:06 +0100949 addk r3, r0, CURRENT_TASK
Michal Simekca545022009-05-26 16:30:21 +0200950
951 /* save registers in cpu_context */
952 /* use r11 and r12, volatile registers, as temp register */
953 /* give start of cpu_context for previous process */
954 addik r11, r5, TI_CPU_CONTEXT
955 swi r1, r11, CC_R1
956 swi r2, r11, CC_R2
957 /* skip volatile registers.
958 * they are saved on stack when we jumped to _switch_to() */
959 /* dedicated registers */
960 swi r13, r11, CC_R13
961 swi r14, r11, CC_R14
962 swi r15, r11, CC_R15
963 swi r16, r11, CC_R16
964 swi r17, r11, CC_R17
965 swi r18, r11, CC_R18
966 /* save non-volatile registers */
967 swi r19, r11, CC_R19
968 swi r20, r11, CC_R20
969 swi r21, r11, CC_R21
970 swi r22, r11, CC_R22
971 swi r23, r11, CC_R23
972 swi r24, r11, CC_R24
973 swi r25, r11, CC_R25
974 swi r26, r11, CC_R26
975 swi r27, r11, CC_R27
976 swi r28, r11, CC_R28
977 swi r29, r11, CC_R29
978 swi r30, r11, CC_R30
979 /* special purpose registers */
980 mfs r12, rmsr
981 nop
982 swi r12, r11, CC_MSR
983 mfs r12, rear
984 nop
985 swi r12, r11, CC_EAR
986 mfs r12, resr
987 nop
988 swi r12, r11, CC_ESR
989 mfs r12, rfsr
990 nop
991 swi r12, r11, CC_FSR
992
Michal Simekb1d70c62010-01-22 10:24:06 +0100993 /* update r31, the current-give me pointer to task which will be next */
994 lwi CURRENT_TASK, r6, TI_TASK
Michal Simekca545022009-05-26 16:30:21 +0200995 /* stored it to current_save too */
Michal Simekb1d70c62010-01-22 10:24:06 +0100996 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
Michal Simekca545022009-05-26 16:30:21 +0200997
998 /* get new process' cpu context and restore */
999 /* give me start where start context of next task */
1000 addik r11, r6, TI_CPU_CONTEXT
1001
1002 /* non-volatile registers */
1003 lwi r30, r11, CC_R30
1004 lwi r29, r11, CC_R29
1005 lwi r28, r11, CC_R28
1006 lwi r27, r11, CC_R27
1007 lwi r26, r11, CC_R26
1008 lwi r25, r11, CC_R25
1009 lwi r24, r11, CC_R24
1010 lwi r23, r11, CC_R23
1011 lwi r22, r11, CC_R22
1012 lwi r21, r11, CC_R21
1013 lwi r20, r11, CC_R20
1014 lwi r19, r11, CC_R19
1015 /* dedicated registers */
1016 lwi r18, r11, CC_R18
1017 lwi r17, r11, CC_R17
1018 lwi r16, r11, CC_R16
1019 lwi r15, r11, CC_R15
1020 lwi r14, r11, CC_R14
1021 lwi r13, r11, CC_R13
1022 /* skip volatile registers */
1023 lwi r2, r11, CC_R2
1024 lwi r1, r11, CC_R1
1025
1026 /* special purpose registers */
1027 lwi r12, r11, CC_FSR
1028 mts rfsr, r12
1029 nop
1030 lwi r12, r11, CC_MSR
1031 mts rmsr, r12
1032 nop
1033
1034 rtsd r15, 8
1035 nop
1036
1037ENTRY(_reset)
1038 brai 0x70; /* Jump back to FS-boot */
1039
1040ENTRY(_break)
1041 mfs r5, rmsr
1042 nop
1043 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1044 mfs r5, resr
1045 nop
1046 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1047 bri 0
1048
1049 /* These are compiled and loaded into high memory, then
1050 * copied into place in mach_early_setup */
1051 .section .init.ivt, "ax"
1052 .org 0x0
1053 /* this is very important - here is the reset vector */
1054 /* in current MMU branch you don't care what is here - it is
1055 * used from bootloader site - but this is correct for FS-BOOT */
1056 brai 0x70
1057 nop
1058 brai TOPHYS(_user_exception); /* syscall handler */
1059 brai TOPHYS(_interrupt); /* Interrupt handler */
1060 brai TOPHYS(_break); /* nmi trap handler */
1061 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1062
1063 .org 0x60
1064 brai TOPHYS(_debug_exception); /* debug trap handler*/
1065
1066.section .rodata,"a"
1067#include "syscall_table.S"
1068
1069syscall_table_size=(.-sys_call_table)
1070
Steven J. Magnanice3266c2010-04-27 12:37:54 -05001071type_SYSCALL:
1072 .ascii "SYSCALL\0"
1073type_IRQ:
1074 .ascii "IRQ\0"
1075type_IRQ_PREEMPT:
1076 .ascii "IRQ (PREEMPTED)\0"
1077type_SYSCALL_PREEMPT:
1078 .ascii " SYSCALL (PREEMPTED)\0"
1079
1080 /*
1081 * Trap decoding for stack unwinder
1082 * Tuples are (start addr, end addr, string)
1083 * If return address lies on [start addr, end addr],
1084 * unwinder displays 'string'
1085 */
1086
1087 .align 4
1088.global microblaze_trap_handlers
1089microblaze_trap_handlers:
1090 /* Exact matches come first */
1091 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1092 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1093 /* Fuzzy matches go here */
1094 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1095 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1096 /* End of table */
1097 .word 0 ; .word 0 ; .word 0