blob: b61572eb2a7130d7a46924f8412024b9879286e7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45.SYS_CALL_TABLE32:
46 .tc .sys_call_table32[TC],.sys_call_table32
47
48/* This value is used to mark exception frames on the stack. */
49exception_marker:
50 .tc ID_72656773_68657265[TC],0x7265677368657265
51
52 .section ".text"
53 .align 7
54
55#undef SHOW_SYSCALLS
56
57 .globl system_call_common
58system_call_common:
59 andi. r10,r12,MSR_PR
60 mr r10,r1
61 addi r1,r1,-INT_FRAME_SIZE
62 beq- 1f
63 ld r1,PACAKSAVE(r13)
641: std r10,0(r1)
65 std r11,_NIP(r1)
66 std r12,_MSR(r1)
67 std r0,GPR0(r1)
68 std r10,GPR1(r1)
69 std r2,GPR2(r1)
70 std r3,GPR3(r1)
71 std r4,GPR4(r1)
72 std r5,GPR5(r1)
73 std r6,GPR6(r1)
74 std r7,GPR7(r1)
75 std r8,GPR8(r1)
76 li r11,0
77 std r11,GPR9(r1)
78 std r11,GPR10(r1)
79 std r11,GPR11(r1)
80 std r11,GPR12(r1)
81 std r9,GPR13(r1)
82 crclr so
83 mfcr r9
84 mflr r10
85 li r11,0xc01
86 std r9,_CCR(r1)
87 std r10,_LINK(r1)
88 std r11,_TRAP(r1)
89 mfxer r9
90 mfctr r10
91 std r9,_XER(r1)
92 std r10,_CTR(r1)
93 std r3,ORIG_GPR3(r1)
94 ld r2,PACATOC(r13)
95 addi r9,r1,STACK_FRAME_OVERHEAD
96 ld r11,exception_marker@toc(r2)
97 std r11,-16(r9) /* "regshere" marker */
98#ifdef CONFIG_PPC_ISERIES
99 /* Hack for handling interrupts when soft-enabling on iSeries */
100 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
101 andi. r10,r12,MSR_PR /* from kernel */
102 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
103 beq hardware_interrupt_entry
104 lbz r10,PACAPROCENABLED(r13)
105 std r10,SOFTE(r1)
106#endif
107 mfmsr r11
108 ori r11,r11,MSR_EE
109 mtmsrd r11,1
110
111#ifdef SHOW_SYSCALLS
112 bl .do_show_syscall
113 REST_GPR(0,r1)
114 REST_4GPRS(3,r1)
115 REST_2GPRS(7,r1)
116 addi r9,r1,STACK_FRAME_OVERHEAD
117#endif
118 clrrdi r11,r1,THREAD_SHIFT
119 li r12,0
120 ld r10,TI_FLAGS(r11)
121 stb r12,TI_SC_NOERR(r11)
122 andi. r11,r10,_TIF_SYSCALL_T_OR_A
123 bne- syscall_dotrace
124syscall_dotrace_cont:
125 cmpldi 0,r0,NR_syscalls
126 bge- syscall_enosys
127
128system_call: /* label this so stack traces look sane */
129/*
130 * Need to vector to 32 Bit or default sys_call_table here,
131 * based on caller's run-mode / personality.
132 */
133 ld r11,.SYS_CALL_TABLE@toc(2)
134 andi. r10,r10,_TIF_32BIT
135 beq 15f
136 ld r11,.SYS_CALL_TABLE32@toc(2)
137 clrldi r3,r3,32
138 clrldi r4,r4,32
139 clrldi r5,r5,32
140 clrldi r6,r6,32
141 clrldi r7,r7,32
142 clrldi r8,r8,32
14315:
144 slwi r0,r0,3
145 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
146 mtctr r10
147 bctrl /* Call handler */
148
149syscall_exit:
150#ifdef SHOW_SYSCALLS
151 std r3,GPR3(r1)
152 bl .do_show_syscall_exit
153 ld r3,GPR3(r1)
154#endif
155 std r3,RESULT(r1)
156 ld r5,_CCR(r1)
157 li r10,-_LAST_ERRNO
158 cmpld r3,r10
159 clrrdi r12,r1,THREAD_SHIFT
160 bge- syscall_error
161syscall_error_cont:
162
163 /* check for syscall tracing or audit */
164 ld r9,TI_FLAGS(r12)
165 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
166 bne- syscall_exit_trace
167syscall_exit_trace_cont:
168
169 /* disable interrupts so current_thread_info()->flags can't change,
170 and so that we don't get interrupted after loading SRR0/1. */
171 ld r8,_MSR(r1)
172 andi. r10,r8,MSR_RI
173 beq- unrecov_restore
174 mfmsr r10
175 rldicl r10,r10,48,1
176 rotldi r10,r10,16
177 mtmsrd r10,1
178 ld r9,TI_FLAGS(r12)
179 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
180 bne- syscall_exit_work
181 ld r7,_NIP(r1)
182 stdcx. r0,0,r1 /* to clear the reservation */
183 andi. r6,r8,MSR_PR
184 ld r4,_LINK(r1)
185 beq- 1f /* only restore r13 if */
186 ld r13,GPR13(r1) /* returning to usermode */
1871: ld r2,GPR2(r1)
188 li r12,MSR_RI
189 andc r10,r10,r12
190 mtmsrd r10,1 /* clear MSR.RI */
191 ld r1,GPR1(r1)
192 mtlr r4
193 mtcr r5
194 mtspr SRR0,r7
195 mtspr SRR1,r8
196 rfid
197 b . /* prevent speculative execution */
198
199syscall_enosys:
200 li r3,-ENOSYS
201 std r3,RESULT(r1)
202 clrrdi r12,r1,THREAD_SHIFT
203 ld r5,_CCR(r1)
204
205syscall_error:
206 lbz r11,TI_SC_NOERR(r12)
207 cmpwi 0,r11,0
208 bne- syscall_error_cont
209 neg r3,r3
210 oris r5,r5,0x1000 /* Set SO bit in CR */
211 std r5,_CCR(r1)
212 b syscall_error_cont
213
214/* Traced system call support */
215syscall_dotrace:
216 bl .save_nvgprs
217 addi r3,r1,STACK_FRAME_OVERHEAD
218 bl .do_syscall_trace_enter
219 ld r0,GPR0(r1) /* Restore original registers */
220 ld r3,GPR3(r1)
221 ld r4,GPR4(r1)
222 ld r5,GPR5(r1)
223 ld r6,GPR6(r1)
224 ld r7,GPR7(r1)
225 ld r8,GPR8(r1)
226 addi r9,r1,STACK_FRAME_OVERHEAD
227 clrrdi r10,r1,THREAD_SHIFT
228 ld r10,TI_FLAGS(r10)
229 b syscall_dotrace_cont
230
231syscall_exit_trace:
232 std r3,GPR3(r1)
233 bl .save_nvgprs
234 addi r3,r1,STACK_FRAME_OVERHEAD
235 bl .do_syscall_trace_leave
236 REST_NVGPRS(r1)
237 ld r3,GPR3(r1)
238 ld r5,_CCR(r1)
239 clrrdi r12,r1,THREAD_SHIFT
240 b syscall_exit_trace_cont
241
242/* Stuff to do on exit from a system call. */
243syscall_exit_work:
244 std r3,GPR3(r1)
245 std r5,_CCR(r1)
246 b .ret_from_except_lite
247
248/* Save non-volatile GPRs, if not already saved. */
249_GLOBAL(save_nvgprs)
250 ld r11,_TRAP(r1)
251 andi. r0,r11,1
252 beqlr-
253 SAVE_NVGPRS(r1)
254 clrrdi r0,r11,1
255 std r0,_TRAP(r1)
256 blr
257
258/*
259 * The sigsuspend and rt_sigsuspend system calls can call do_signal
260 * and thus put the process into the stopped state where we might
261 * want to examine its user state with ptrace. Therefore we need
262 * to save all the nonvolatile registers (r14 - r31) before calling
263 * the C code. Similarly, fork, vfork and clone need the full
264 * register state on the stack so that it can be copied to the child.
265 */
266_GLOBAL(ppc32_sigsuspend)
267 bl .save_nvgprs
268 bl .sys32_sigsuspend
269 b 70f
270
271_GLOBAL(ppc64_rt_sigsuspend)
272 bl .save_nvgprs
273 bl .sys_rt_sigsuspend
274 b 70f
275
276_GLOBAL(ppc32_rt_sigsuspend)
277 bl .save_nvgprs
278 bl .sys32_rt_sigsuspend
279 /* If sigsuspend() returns zero, we are going into a signal handler */
28070: cmpdi 0,r3,0
281 beq .ret_from_except
282 /* If it returned -EINTR, we need to return via syscall_exit to set
283 the SO bit in cr0 and potentially stop for ptrace. */
284 b syscall_exit
285
286_GLOBAL(ppc_fork)
287 bl .save_nvgprs
288 bl .sys_fork
289 b syscall_exit
290
291_GLOBAL(ppc_vfork)
292 bl .save_nvgprs
293 bl .sys_vfork
294 b syscall_exit
295
296_GLOBAL(ppc_clone)
297 bl .save_nvgprs
298 bl .sys_clone
299 b syscall_exit
300
301_GLOBAL(ppc32_swapcontext)
302 bl .save_nvgprs
303 bl .sys32_swapcontext
304 b 80f
305
306_GLOBAL(ppc64_swapcontext)
307 bl .save_nvgprs
308 bl .sys_swapcontext
309 b 80f
310
311_GLOBAL(ppc32_sigreturn)
312 bl .sys32_sigreturn
313 b 80f
314
315_GLOBAL(ppc32_rt_sigreturn)
316 bl .sys32_rt_sigreturn
317 b 80f
318
319_GLOBAL(ppc64_rt_sigreturn)
320 bl .sys_rt_sigreturn
321
32280: cmpdi 0,r3,0
323 blt syscall_exit
324 clrrdi r4,r1,THREAD_SHIFT
325 ld r4,TI_FLAGS(r4)
326 andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
327 beq+ 81f
328 addi r3,r1,STACK_FRAME_OVERHEAD
329 bl .do_syscall_trace_leave
33081: b .ret_from_except
331
332_GLOBAL(ret_from_fork)
333 bl .schedule_tail
334 REST_NVGPRS(r1)
335 li r3,0
336 b syscall_exit
337
338/*
339 * This routine switches between two different tasks. The process
340 * state of one is saved on its kernel stack. Then the state
341 * of the other is restored from its kernel stack. The memory
342 * management hardware is updated to the second process's state.
343 * Finally, we can return to the second process, via ret_from_except.
344 * On entry, r3 points to the THREAD for the current task, r4
345 * points to the THREAD for the new task.
346 *
347 * Note: there are two ways to get to the "going out" portion
348 * of this code; either by coming in via the entry (_switch)
349 * or via "fork" which must set up an environment equivalent
350 * to the "_switch" path. If you change this you'll have to change
351 * the fork code also.
352 *
353 * The code which creates the new task context is in 'copy_thread'
354 * in arch/ppc64/kernel/process.c
355 */
356 .align 7
357_GLOBAL(_switch)
358 mflr r0
359 std r0,16(r1)
360 stdu r1,-SWITCH_FRAME_SIZE(r1)
361 /* r3-r13 are caller saved -- Cort */
362 SAVE_8GPRS(14, r1)
363 SAVE_10GPRS(22, r1)
364 mflr r20 /* Return to switch caller */
365 mfmsr r22
366 li r0, MSR_FP
367#ifdef CONFIG_ALTIVEC
368BEGIN_FTR_SECTION
369 oris r0,r0,MSR_VEC@h /* Disable altivec */
370 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
371 std r24,THREAD_VRSAVE(r3)
372END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
373#endif /* CONFIG_ALTIVEC */
374 and. r0,r0,r22
375 beq+ 1f
376 andc r22,r22,r0
377 mtmsrd r22
378 isync
3791: std r20,_NIP(r1)
380 mfcr r23
381 std r23,_CCR(r1)
382 std r1,KSP(r3) /* Set old stack pointer */
383
384#ifdef CONFIG_SMP
385 /* We need a sync somewhere here to make sure that if the
386 * previous task gets rescheduled on another CPU, it sees all
387 * stores it has performed on this one.
388 */
389 sync
390#endif /* CONFIG_SMP */
391
392 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
393 std r6,PACACURRENT(r13) /* Set new 'current' */
394
395 ld r8,KSP(r4) /* new stack pointer */
396BEGIN_FTR_SECTION
397 clrrdi r6,r8,28 /* get its ESID */
398 clrrdi r9,r1,28 /* get current sp ESID */
399 clrldi. r0,r6,2 /* is new ESID c00000000? */
400 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
401 cror eq,4*cr1+eq,eq
402 beq 2f /* if yes, don't slbie it */
403 oris r0,r6,0x0800 /* set C (class) bit */
404
405 /* Bolt in the new stack SLB entry */
406 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
407 oris r6,r6,(SLB_ESID_V)@h
408 ori r6,r6,(SLB_NUM_BOLTED-1)@l
409 slbie r0
410 slbie r0 /* Workaround POWER5 < DD2.1 issue */
411 slbmte r7,r6
412 isync
413
4142:
415END_FTR_SECTION_IFSET(CPU_FTR_SLB)
416 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
417 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
418 because we don't need to leave the 288-byte ABI gap at the
419 top of the kernel stack. */
420 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
421
422 mr r1,r8 /* start using new stack pointer */
423 std r7,PACAKSAVE(r13)
424
425 ld r6,_CCR(r1)
426 mtcrf 0xFF,r6
427
428#ifdef CONFIG_ALTIVEC
429BEGIN_FTR_SECTION
430 ld r0,THREAD_VRSAVE(r4)
431 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
432END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
433#endif /* CONFIG_ALTIVEC */
434
435 /* r3-r13 are destroyed -- Cort */
436 REST_8GPRS(14, r1)
437 REST_10GPRS(22, r1)
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 /* convert old thread to its task_struct for return value */
440 addi r3,r3,-THREAD
441 ld r7,_NIP(r1) /* Return to _switch caller in new task */
442 mtlr r7
443 addi r1,r1,SWITCH_FRAME_SIZE
444 blr
445
446 .align 7
447_GLOBAL(ret_from_except)
448 ld r11,_TRAP(r1)
449 andi. r0,r11,1
450 bne .ret_from_except_lite
451 REST_NVGPRS(r1)
452
453_GLOBAL(ret_from_except_lite)
454 /*
455 * Disable interrupts so that current_thread_info()->flags
456 * can't change between when we test it and when we return
457 * from the interrupt.
458 */
459 mfmsr r10 /* Get current interrupt state */
460 rldicl r9,r10,48,1 /* clear MSR_EE */
461 rotldi r9,r9,16
462 mtmsrd r9,1 /* Update machine state */
463
464#ifdef CONFIG_PREEMPT
465 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
466 li r0,_TIF_NEED_RESCHED /* bits to check */
467 ld r3,_MSR(r1)
468 ld r4,TI_FLAGS(r9)
469 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
470 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
471 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
472 bne do_work
473
474#else /* !CONFIG_PREEMPT */
475 ld r3,_MSR(r1) /* Returning to user mode? */
476 andi. r3,r3,MSR_PR
477 beq restore /* if not, just restore regs and return */
478
479 /* Check current_thread_info()->flags */
480 clrrdi r9,r1,THREAD_SHIFT
481 ld r4,TI_FLAGS(r9)
482 andi. r0,r4,_TIF_USER_WORK_MASK
483 bne do_work
484#endif
485
486restore:
487#ifdef CONFIG_PPC_ISERIES
488 ld r5,SOFTE(r1)
489 cmpdi 0,r5,0
490 beq 4f
491 /* Check for pending interrupts (iSeries) */
492 ld r3,PACALPPACA+LPPACAANYINT(r13)
493 cmpdi r3,0
494 beq+ 4f /* skip do_IRQ if no interrupts */
495
496 li r3,0
497 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
498 ori r10,r10,MSR_EE
499 mtmsrd r10 /* hard-enable again */
500 addi r3,r1,STACK_FRAME_OVERHEAD
501 bl .do_IRQ
502 b .ret_from_except_lite /* loop back and handle more */
503
5044: stb r5,PACAPROCENABLED(r13)
505#endif
506
507 ld r3,_MSR(r1)
508 andi. r0,r3,MSR_RI
509 beq- unrecov_restore
510
511 andi. r0,r3,MSR_PR
512
513 /*
514 * r13 is our per cpu area, only restore it if we are returning to
515 * userspace
516 */
517 beq 1f
518 REST_GPR(13, r1)
5191:
520 ld r3,_CTR(r1)
521 ld r0,_LINK(r1)
522 mtctr r3
523 mtlr r0
524 ld r3,_XER(r1)
525 mtspr XER,r3
526
527 REST_8GPRS(5, r1)
528
529 stdcx. r0,0,r1 /* to clear the reservation */
530
531 mfmsr r0
532 li r2, MSR_RI
533 andc r0,r0,r2
534 mtmsrd r0,1
535
536 ld r0,_MSR(r1)
537 mtspr SRR1,r0
538
539 ld r2,_CCR(r1)
540 mtcrf 0xFF,r2
541 ld r2,_NIP(r1)
542 mtspr SRR0,r2
543
544 ld r0,GPR0(r1)
545 ld r2,GPR2(r1)
546 ld r3,GPR3(r1)
547 ld r4,GPR4(r1)
548 ld r1,GPR1(r1)
549
550 rfid
551 b . /* prevent speculative execution */
552
553/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
554do_work:
555#ifdef CONFIG_PREEMPT
556 andi. r0,r3,MSR_PR /* Returning to user mode? */
557 bne user_work
558 /* Check that preempt_count() == 0 and interrupts are enabled */
559 lwz r8,TI_PREEMPT(r9)
560 cmpwi cr1,r8,0
561#ifdef CONFIG_PPC_ISERIES
562 ld r0,SOFTE(r1)
563 cmpdi r0,0
564#else
565 andi. r0,r3,MSR_EE
566#endif
567 crandc eq,cr1*4+eq,eq
568 bne restore
569 /* here we are preempting the current task */
5701:
571#ifdef CONFIG_PPC_ISERIES
572 li r0,1
573 stb r0,PACAPROCENABLED(r13)
574#endif
575 ori r10,r10,MSR_EE
576 mtmsrd r10,1 /* reenable interrupts */
577 bl .preempt_schedule
578 mfmsr r10
579 clrrdi r9,r1,THREAD_SHIFT
580 rldicl r10,r10,48,1 /* disable interrupts again */
581 rotldi r10,r10,16
582 mtmsrd r10,1
583 ld r4,TI_FLAGS(r9)
584 andi. r0,r4,_TIF_NEED_RESCHED
585 bne 1b
586 b restore
587
588user_work:
589#endif
590 /* Enable interrupts */
591 ori r10,r10,MSR_EE
592 mtmsrd r10,1
593
594 andi. r0,r4,_TIF_NEED_RESCHED
595 beq 1f
596 bl .schedule
597 b .ret_from_except_lite
598
5991: bl .save_nvgprs
600 li r3,0
601 addi r4,r1,STACK_FRAME_OVERHEAD
602 bl .do_signal
603 b .ret_from_except
604
605unrecov_restore:
606 addi r3,r1,STACK_FRAME_OVERHEAD
607 bl .unrecoverable_exception
608 b unrecov_restore
609
610#ifdef CONFIG_PPC_RTAS
611/*
612 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
613 * called with the MMU off.
614 *
615 * In addition, we need to be in 32b mode, at least for now.
616 *
617 * Note: r3 is an input parameter to rtas, so don't trash it...
618 */
619_GLOBAL(enter_rtas)
620 mflr r0
621 std r0,16(r1)
622 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
623
624 /* Because RTAS is running in 32b mode, it clobbers the high order half
625 * of all registers that it saves. We therefore save those registers
626 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
627 */
628 SAVE_GPR(2, r1) /* Save the TOC */
629 SAVE_GPR(13, r1) /* Save paca */
630 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
631 SAVE_10GPRS(22, r1) /* ditto */
632
633 mfcr r4
634 std r4,_CCR(r1)
635 mfctr r5
636 std r5,_CTR(r1)
637 mfspr r6,XER
638 std r6,_XER(r1)
639 mfdar r7
640 std r7,_DAR(r1)
641 mfdsisr r8
642 std r8,_DSISR(r1)
643 mfsrr0 r9
644 std r9,_SRR0(r1)
645 mfsrr1 r10
646 std r10,_SRR1(r1)
647
648 /* There is no way it is acceptable to get here with interrupts enabled,
649 * check it with the asm equivalent of WARN_ON
650 */
651 mfmsr r6
652 andi. r0,r6,MSR_EE
6531: tdnei r0,0
654.section __bug_table,"a"
655 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
656.previous
657.section .rodata,"a"
6581: .asciz __FILE__
6592: .asciz "enter_rtas"
660.previous
661
662 /* Unfortunately, the stack pointer and the MSR are also clobbered,
663 * so they are saved in the PACA which allows us to restore
664 * our original state after RTAS returns.
665 */
666 std r1,PACAR1(r13)
667 std r6,PACASAVEDMSR(r13)
668
669 /* Setup our real return addr */
670 SET_REG_TO_LABEL(r4,.rtas_return_loc)
671 SET_REG_TO_CONST(r9,KERNELBASE)
672 sub r4,r4,r9
673 mtlr r4
674
675 li r0,0
676 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
677 andc r0,r6,r0
678
679 li r9,1
680 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
681 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
682 andc r6,r0,r9
683 ori r6,r6,MSR_RI
684 sync /* disable interrupts so SRR0/1 */
685 mtmsrd r0 /* don't get trashed */
686
687 SET_REG_TO_LABEL(r4,rtas)
688 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
689 ld r4,RTASBASE(r4) /* get the rtas->base value */
690
691 mtspr SRR0,r5
692 mtspr SRR1,r6
693 rfid
694 b . /* prevent speculative execution */
695
696_STATIC(rtas_return_loc)
697 /* relocation is off at this point */
698 mfspr r4,SPRG3 /* Get PACA */
699 SET_REG_TO_CONST(r5, KERNELBASE)
700 sub r4,r4,r5 /* RELOC the PACA base pointer */
701
702 mfmsr r6
703 li r0,MSR_RI
704 andc r6,r6,r0
705 sync
706 mtmsrd r6
707
708 ld r1,PACAR1(r4) /* Restore our SP */
709 LOADADDR(r3,.rtas_restore_regs)
710 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
711
712 mtspr SRR0,r3
713 mtspr SRR1,r4
714 rfid
715 b . /* prevent speculative execution */
716
717_STATIC(rtas_restore_regs)
718 /* relocation is on at this point */
719 REST_GPR(2, r1) /* Restore the TOC */
720 REST_GPR(13, r1) /* Restore paca */
721 REST_8GPRS(14, r1) /* Restore the non-volatiles */
722 REST_10GPRS(22, r1) /* ditto */
723
724 mfspr r13,SPRG3
725
726 ld r4,_CCR(r1)
727 mtcr r4
728 ld r5,_CTR(r1)
729 mtctr r5
730 ld r6,_XER(r1)
731 mtspr XER,r6
732 ld r7,_DAR(r1)
733 mtdar r7
734 ld r8,_DSISR(r1)
735 mtdsisr r8
736 ld r9,_SRR0(r1)
737 mtsrr0 r9
738 ld r10,_SRR1(r1)
739 mtsrr1 r10
740
741 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
742 ld r0,16(r1) /* get return address */
743
744 mtlr r0
745 blr /* return to caller */
746
747#endif /* CONFIG_PPC_RTAS */
748
749#ifdef CONFIG_PPC_MULTIPLATFORM
750
751_GLOBAL(enter_prom)
752 mflr r0
753 std r0,16(r1)
754 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
755
756 /* Because PROM is running in 32b mode, it clobbers the high order half
757 * of all registers that it saves. We therefore save those registers
758 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
759 */
760 SAVE_8GPRS(2, r1)
761 SAVE_GPR(13, r1)
762 SAVE_8GPRS(14, r1)
763 SAVE_10GPRS(22, r1)
764 mfcr r4
765 std r4,_CCR(r1)
766 mfctr r5
767 std r5,_CTR(r1)
768 mfspr r6,XER
769 std r6,_XER(r1)
770 mfdar r7
771 std r7,_DAR(r1)
772 mfdsisr r8
773 std r8,_DSISR(r1)
774 mfsrr0 r9
775 std r9,_SRR0(r1)
776 mfsrr1 r10
777 std r10,_SRR1(r1)
778 mfmsr r11
779 std r11,_MSR(r1)
780
781 /* Get the PROM entrypoint */
782 ld r0,GPR4(r1)
783 mtlr r0
784
785 /* Switch MSR to 32 bits mode
786 */
787 mfmsr r11
788 li r12,1
789 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
790 andc r11,r11,r12
791 li r12,1
792 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
793 andc r11,r11,r12
794 mtmsrd r11
795 isync
796
797 /* Restore arguments & enter PROM here... */
798 ld r3,GPR3(r1)
799 blrl
800
801 /* Just make sure that r1 top 32 bits didn't get
802 * corrupt by OF
803 */
804 rldicl r1,r1,0,32
805
806 /* Restore the MSR (back to 64 bits) */
807 ld r0,_MSR(r1)
808 mtmsrd r0
809 isync
810
811 /* Restore other registers */
812 REST_GPR(2, r1)
813 REST_GPR(13, r1)
814 REST_8GPRS(14, r1)
815 REST_10GPRS(22, r1)
816 ld r4,_CCR(r1)
817 mtcr r4
818 ld r5,_CTR(r1)
819 mtctr r5
820 ld r6,_XER(r1)
821 mtspr XER,r6
822 ld r7,_DAR(r1)
823 mtdar r7
824 ld r8,_DSISR(r1)
825 mtdsisr r8
826 ld r9,_SRR0(r1)
827 mtsrr0 r9
828 ld r10,_SRR1(r1)
829 mtsrr1 r10
830
831 addi r1,r1,PROM_FRAME_SIZE
832 ld r0,16(r1)
833 mtlr r0
834 blr
835
836#endif /* CONFIG_PPC_MULTIPLATFORM */