blob: 946cb0187751caf241a0f2f4e1b5b9ebe3c535a0 [file] [log] [blame]
Greg Ungerer14be4252012-05-09 17:05:24 +10001/* -*- mode: asm -*-
2 *
3 * linux/arch/m68k/kernel/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 * all pointers that used to be 'current' are now entry
30 * number 0 in the 'current_set' list.
31 *
32 * 6/05/00 RZ: addedd writeback completion after return from sighandler
33 * for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/errno.h>
38#include <asm/setup.h>
39#include <asm/segment.h>
40#include <asm/traps.h>
41#include <asm/unistd.h>
42#include <asm/asm-offsets.h>
43#include <asm/entry.h>
44
45.globl system_call, buserr, trap, resume
46.globl sys_call_table
47.globl sys_fork, sys_clone, sys_vfork
48.globl ret_from_interrupt, bad_interrupt
49.globl auto_irqhandler_fixup
50.globl user_irqvec_fixup
51
52.text
53ENTRY(sys_fork)
54 SAVE_SWITCH_STACK
55 pea %sp@(SWITCH_STACK_SIZE)
56 jbsr m68k_fork
57 addql #4,%sp
58 RESTORE_SWITCH_STACK
59 rts
60
61ENTRY(sys_clone)
62 SAVE_SWITCH_STACK
63 pea %sp@(SWITCH_STACK_SIZE)
64 jbsr m68k_clone
65 addql #4,%sp
66 RESTORE_SWITCH_STACK
67 rts
68
69ENTRY(sys_vfork)
70 SAVE_SWITCH_STACK
71 pea %sp@(SWITCH_STACK_SIZE)
72 jbsr m68k_vfork
73 addql #4,%sp
74 RESTORE_SWITCH_STACK
75 rts
76
77ENTRY(sys_sigreturn)
78 SAVE_SWITCH_STACK
79 jbsr do_sigreturn
80 RESTORE_SWITCH_STACK
81 rts
82
83ENTRY(sys_rt_sigreturn)
84 SAVE_SWITCH_STACK
85 jbsr do_rt_sigreturn
86 RESTORE_SWITCH_STACK
87 rts
88
89ENTRY(buserr)
90 SAVE_ALL_INT
91 GET_CURRENT(%d0)
92 movel %sp,%sp@- | stack frame pointer argument
93 jbsr buserr_c
94 addql #4,%sp
95 jra ret_from_exception
96
97ENTRY(trap)
98 SAVE_ALL_INT
99 GET_CURRENT(%d0)
100 movel %sp,%sp@- | stack frame pointer argument
101 jbsr trap_c
102 addql #4,%sp
103 jra ret_from_exception
104
105 | After a fork we jump here directly from resume,
106 | so that %d1 contains the previous task
107 | schedule_tail now used regardless of CONFIG_SMP
108ENTRY(ret_from_fork)
109 movel %d1,%sp@-
110 jsr schedule_tail
111 addql #4,%sp
112 jra ret_from_exception
113
Al Viro533e6902012-09-16 12:05:09 -0400114ENTRY(ret_from_kernel_thread)
115 | a3 contains the kernel thread payload, d7 - its argument
116 movel %d1,%sp@-
117 jsr schedule_tail
118 GET_CURRENT(%d0)
119 movel %d7,(%sp)
120 jsr %a3@
121 addql #4,%sp
122 movel %d0,(%sp)
123 jra sys_exit
124
Al Virod878d6d2012-09-16 12:06:34 -0400125ENTRY(ret_from_kernel_execve)
126 movel 4(%sp), %sp
127 GET_CURRENT(%d0)
128 jra ret_from_exception
129
Greg Ungerer14be4252012-05-09 17:05:24 +1000130#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
131
132#ifdef TRAP_DBG_INTERRUPT
133
134.globl dbginterrupt
135ENTRY(dbginterrupt)
136 SAVE_ALL_INT
137 GET_CURRENT(%d0)
138 movel %sp,%sp@- /* stack frame pointer argument */
139 jsr dbginterrupt_c
140 addql #4,%sp
141 jra ret_from_exception
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142#endif
Greg Ungerer14be4252012-05-09 17:05:24 +1000143
144ENTRY(reschedule)
145 /* save top of frame */
146 pea %sp@
147 jbsr set_esp0
148 addql #4,%sp
149 pea ret_from_exception
150 jmp schedule
151
152ENTRY(ret_from_user_signal)
153 moveq #__NR_sigreturn,%d0
154 trap #0
155
156ENTRY(ret_from_user_rt_signal)
157 movel #__NR_rt_sigreturn,%d0
158 trap #0
159
160#else
161
162do_trace_entry:
163 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
164 subql #4,%sp
165 SAVE_SWITCH_STACK
166 jbsr syscall_trace
167 RESTORE_SWITCH_STACK
168 addql #4,%sp
169 movel %sp@(PT_OFF_ORIG_D0),%d0
170 cmpl #NR_syscalls,%d0
171 jcs syscall
172badsys:
173 movel #-ENOSYS,%sp@(PT_OFF_D0)
174 jra ret_from_syscall
175
176do_trace_exit:
177 subql #4,%sp
178 SAVE_SWITCH_STACK
179 jbsr syscall_trace
180 RESTORE_SWITCH_STACK
181 addql #4,%sp
182 jra .Lret_from_exception
183
184ENTRY(ret_from_signal)
185 movel %curptr@(TASK_STACK),%a1
186 tstb %a1@(TINFO_FLAGS+2)
187 jge 1f
188 jbsr syscall_trace
1891: RESTORE_SWITCH_STACK
190 addql #4,%sp
191/* on 68040 complete pending writebacks if any */
192#ifdef CONFIG_M68040
193 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
194 subql #7,%d0 | bus error frame ?
195 jbne 1f
196 movel %sp,%sp@-
197 jbsr berr_040cleanup
198 addql #4,%sp
1991:
200#endif
201 jra .Lret_from_exception
202
203ENTRY(system_call)
204 SAVE_ALL_SYS
205
206 GET_CURRENT(%d1)
207 movel %d1,%a1
208
209 | save top of frame
210 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
211
212 | syscall trace?
213 tstb %a1@(TINFO_FLAGS+2)
214 jmi do_trace_entry
215 cmpl #NR_syscalls,%d0
216 jcc badsys
217syscall:
218 jbsr @(sys_call_table,%d0:l:4)@(0)
219 movel %d0,%sp@(PT_OFF_D0) | save the return value
220ret_from_syscall:
221 |oriw #0x0700,%sr
222 movel %curptr@(TASK_STACK),%a1
223 movew %a1@(TINFO_FLAGS+2),%d0
224 jne syscall_exit_work
2251: RESTORE_ALL
226
227syscall_exit_work:
228 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
229 bnes 1b | if so, skip resched, signals
230 lslw #1,%d0
231 jcs do_trace_exit
232 jmi do_delayed_trace
233 lslw #8,%d0
234 jne do_signal_return
235 pea resume_userspace
236 jra schedule
237
238
239ENTRY(ret_from_exception)
240.Lret_from_exception:
241 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
242 bnes 1f | if so, skip resched, signals
243 | only allow interrupts when we are really the last one on the
244 | kernel stack, otherwise stack overflow can occur during
245 | heavy interrupt load
246 andw #ALLOWINT,%sr
247
248resume_userspace:
249 movel %curptr@(TASK_STACK),%a1
250 moveb %a1@(TINFO_FLAGS+3),%d0
251 jne exit_work
2521: RESTORE_ALL
253
254exit_work:
255 | save top of frame
256 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
257 lslb #1,%d0
258 jne do_signal_return
259 pea resume_userspace
260 jra schedule
261
262
263do_signal_return:
264 |andw #ALLOWINT,%sr
265 subql #4,%sp | dummy return address
266 SAVE_SWITCH_STACK
267 pea %sp@(SWITCH_STACK_SIZE)
268 bsrl do_notify_resume
269 addql #4,%sp
270 RESTORE_SWITCH_STACK
271 addql #4,%sp
272 jbra resume_userspace
273
274do_delayed_trace:
275 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
276 pea 1 | send SIGTRAP
277 movel %curptr,%sp@-
278 pea LSIGTRAP
279 jbsr send_sig
280 addql #8,%sp
281 addql #4,%sp
282 jbra resume_userspace
283
284
285/* This is the main interrupt handler for autovector interrupts */
286
287ENTRY(auto_inthandler)
288 SAVE_ALL_INT
289 GET_CURRENT(%d0)
290 movel %d0,%a1
291 addqb #1,%a1@(TINFO_PREEMPT+1)
292 | put exception # in d0
293 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
294 subw #VEC_SPUR,%d0
295
296 movel %sp,%sp@-
297 movel %d0,%sp@- | put vector # on stack
298auto_irqhandler_fixup = . + 2
299 jsr do_IRQ | process the IRQ
300 addql #8,%sp | pop parameters off stack
301
302ret_from_interrupt:
303 movel %curptr@(TASK_STACK),%a1
304 subqb #1,%a1@(TINFO_PREEMPT+1)
305 jeq ret_from_last_interrupt
3062: RESTORE_ALL
307
308 ALIGN
309ret_from_last_interrupt:
310 moveq #(~ALLOWINT>>8)&0xff,%d0
311 andb %sp@(PT_OFF_SR),%d0
312 jne 2b
313
314 /* check if we need to do software interrupts */
315 tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
316 jeq .Lret_from_exception
317 pea ret_from_exception
318 jra do_softirq
319
320/* Handler for user defined interrupt vectors */
321
322ENTRY(user_inthandler)
323 SAVE_ALL_INT
324 GET_CURRENT(%d0)
325 movel %d0,%a1
326 addqb #1,%a1@(TINFO_PREEMPT+1)
327 | put exception # in d0
328 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
329user_irqvec_fixup = . + 2
330 subw #VEC_USER,%d0
331
332 movel %sp,%sp@-
333 movel %d0,%sp@- | put vector # on stack
334 jsr do_IRQ | process the IRQ
335 addql #8,%sp | pop parameters off stack
336
337 movel %curptr@(TASK_STACK),%a1
338 subqb #1,%a1@(TINFO_PREEMPT+1)
339 jeq ret_from_last_interrupt
340 RESTORE_ALL
341
342/* Handler for uninitialized and spurious interrupts */
343
344ENTRY(bad_inthandler)
345 SAVE_ALL_INT
346 GET_CURRENT(%d0)
347 movel %d0,%a1
348 addqb #1,%a1@(TINFO_PREEMPT+1)
349
350 movel %sp,%sp@-
351 jsr handle_badint
352 addql #4,%sp
353
354 movel %curptr@(TASK_STACK),%a1
355 subqb #1,%a1@(TINFO_PREEMPT+1)
356 jeq ret_from_last_interrupt
357 RESTORE_ALL
358
359
360resume:
361 /*
362 * Beware - when entering resume, prev (the current task) is
363 * in a0, next (the new task) is in a1,so don't change these
364 * registers until their contents are no longer needed.
365 */
366
367 /* save sr */
368 movew %sr,%a0@(TASK_THREAD+THREAD_SR)
369
370 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
371 movec %sfc,%d0
372 movew %d0,%a0@(TASK_THREAD+THREAD_FS)
373
374 /* save usp */
375 /* it is better to use a movel here instead of a movew 8*) */
376 movec %usp,%d0
377 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
378
379 /* save non-scratch registers on stack */
380 SAVE_SWITCH_STACK
381
382 /* save current kernel stack pointer */
383 movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
384
385 /* save floating point context */
386#ifndef CONFIG_M68KFPU_EMU_ONLY
387#ifdef CONFIG_M68KFPU_EMU
388 tstl m68k_fputype
389 jeq 3f
390#endif
391 fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
392
393#if defined(CONFIG_M68060)
394#if !defined(CPU_M68060_ONLY)
395 btst #3,m68k_cputype+3
396 beqs 1f
397#endif
398 /* The 060 FPU keeps status in bits 15-8 of the first longword */
399 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
400 jeq 3f
401#if !defined(CPU_M68060_ONLY)
402 jra 2f
403#endif
404#endif /* CONFIG_M68060 */
405#if !defined(CPU_M68060_ONLY)
4061: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
407 jeq 3f
408#endif
4092: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
410 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
4113:
412#endif /* CONFIG_M68KFPU_EMU_ONLY */
413 /* Return previous task in %d1 */
414 movel %curptr,%d1
415
416 /* switch to new task (a1 contains new task) */
417 movel %a1,%curptr
418
419 /* restore floating point context */
420#ifndef CONFIG_M68KFPU_EMU_ONLY
421#ifdef CONFIG_M68KFPU_EMU
422 tstl m68k_fputype
423 jeq 4f
424#endif
425#if defined(CONFIG_M68060)
426#if !defined(CPU_M68060_ONLY)
427 btst #3,m68k_cputype+3
428 beqs 1f
429#endif
430 /* The 060 FPU keeps status in bits 15-8 of the first longword */
431 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
432 jeq 3f
433#if !defined(CPU_M68060_ONLY)
434 jra 2f
435#endif
436#endif /* CONFIG_M68060 */
437#if !defined(CPU_M68060_ONLY)
4381: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
439 jeq 3f
440#endif
4412: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
442 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4433: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4444:
445#endif /* CONFIG_M68KFPU_EMU_ONLY */
446
447 /* restore the kernel stack pointer */
448 movel %a1@(TASK_THREAD+THREAD_KSP),%sp
449
450 /* restore non-scratch registers */
451 RESTORE_SWITCH_STACK
452
453 /* restore user stack pointer */
454 movel %a1@(TASK_THREAD+THREAD_USP),%a0
455 movel %a0,%usp
456
457 /* restore fs (sfc,%dfc) */
458 movew %a1@(TASK_THREAD+THREAD_FS),%a0
459 movec %a0,%sfc
460 movec %a0,%dfc
461
462 /* restore status register */
463 movew %a1@(TASK_THREAD+THREAD_SR),%sr
464
465 rts
466
467#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */