blob: 65a82c2645a7bf59923e8b4ed92a26b6b4d7517e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/config.h>
Sam Ravnborg0013a852005-09-09 20:57:26 +020026#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28/* we have the following possibilities to act on an interruption:
29 * - handle in assembly and use shadowed registers only
30 * - save registers to kernel stack and handle in assembly or C */
31
32
Grant Grundler896a3752005-10-21 22:40:07 -040033#include <asm/psw.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/assembly.h> /* for LDREG/STREG defines */
35#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#ifdef __LP64__
41#define CMPIB cmpib,*
42#define CMPB cmpb,*
43#define COND(x) *x
44
45 .level 2.0w
46#else
47#define CMPIB cmpib,
48#define CMPB cmpb,
49#define COND(x) x
50
51 .level 2.0
52#endif
53
54 .import pa_dbit_lock,data
55
56 /* space_to_prot macro creates a prot id from a space id */
57
58#if (SPACEID_SHIFT) == 0
59 .macro space_to_prot spc prot
60 depd,z \spc,62,31,\prot
61 .endm
62#else
63 .macro space_to_prot spc prot
64 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
65 .endm
66#endif
67
68 /* Switch to virtual mapping, trashing only %r1 */
69 .macro virt_map
Grant Grundler896a3752005-10-21 22:40:07 -040070 /* pcxt_ssm_bug */
71 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 mtsp %r0, %sr4
73 mtsp %r0, %sr5
Grant Grundler896a3752005-10-21 22:40:07 -040074 mfsp %sr7, %r1
75 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
76 mtsp %r1, %sr3
77 tovirt_r1 %r29
78 load32 KERNEL_PSW, %r1
79
80 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 mtsp %r0, %sr6
82 mtsp %r0, %sr7
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 mtctl %r0, %cr17 /* Clear IIASQ tail */
84 mtctl %r0, %cr17 /* Clear IIASQ head */
Grant Grundler896a3752005-10-21 22:40:07 -040085 mtctl %r1, %ipsw
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 load32 4f, %r1
87 mtctl %r1, %cr18 /* Set IIAOQ tail */
88 ldo 4(%r1), %r1
89 mtctl %r1, %cr18 /* Set IIAOQ head */
90 rfir
91 nop
924:
93 .endm
94
95 /*
96 * The "get_stack" macros are responsible for determining the
97 * kernel stack value.
98 *
99 * For Faults:
100 * If sr7 == 0
101 * Already using a kernel stack, so call the
102 * get_stack_use_r30 macro to push a pt_regs structure
103 * on the stack, and store registers there.
104 * else
105 * Need to set up a kernel stack, so call the
106 * get_stack_use_cr30 macro to set up a pointer
107 * to the pt_regs structure contained within the
108 * task pointer pointed to by cr30. Set the stack
109 * pointer to point to the end of the task structure.
110 *
111 * For Interrupts:
112 * If sr7 == 0
113 * Already using a kernel stack, check to see if r30
114 * is already pointing to the per processor interrupt
115 * stack. If it is, call the get_stack_use_r30 macro
116 * to push a pt_regs structure on the stack, and store
117 * registers there. Otherwise, call get_stack_use_cr31
118 * to get a pointer to the base of the interrupt stack
119 * and push a pt_regs structure on that stack.
120 * else
121 * Need to set up a kernel stack, so call the
122 * get_stack_use_cr30 macro to set up a pointer
123 * to the pt_regs structure contained within the
124 * task pointer pointed to by cr30. Set the stack
125 * pointer to point to the end of the task structure.
126 * N.B: We don't use the interrupt stack for the
127 * first interrupt from userland, because signals/
128 * resched's are processed when returning to userland,
129 * and we can sleep in those cases.
130 *
131 * Note that we use shadowed registers for temps until
132 * we can save %r26 and %r29. %r26 is used to preserve
133 * %r8 (a shadowed register) which temporarily contained
134 * either the fault type ("code") or the eirr. We need
135 * to use a non-shadowed register to carry the value over
136 * the rfir in virt_map. We use %r26 since this value winds
137 * up being passed as the argument to either do_cpu_irq_mask
138 * or handle_interruption. %r29 is used to hold a pointer
139 * the register save area, and once again, it needs to
140 * be a non-shadowed register so that it survives the rfir.
141 *
142 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
143 */
144
145 .macro get_stack_use_cr30
146
147 /* we save the registers in the task struct */
148
149 mfctl %cr30, %r1
150 tophys %r1,%r9
151 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
152 tophys %r1,%r9
153 ldo TASK_REGS(%r9),%r9
154 STREG %r30, PT_GR30(%r9)
155 STREG %r29,PT_GR29(%r9)
156 STREG %r26,PT_GR26(%r9)
157 copy %r9,%r29
158 mfctl %cr30, %r1
159 ldo THREAD_SZ_ALGN(%r1), %r30
160 .endm
161
162 .macro get_stack_use_r30
163
164 /* we put a struct pt_regs on the stack and save the registers there */
165
166 tophys %r30,%r9
167 STREG %r30,PT_GR30(%r9)
168 ldo PT_SZ_ALGN(%r30),%r30
169 STREG %r29,PT_GR29(%r9)
170 STREG %r26,PT_GR26(%r9)
171 copy %r9,%r29
172 .endm
173
174 .macro rest_stack
175 LDREG PT_GR1(%r29), %r1
176 LDREG PT_GR30(%r29),%r30
177 LDREG PT_GR29(%r29),%r29
178 .endm
179
180 /* default interruption handler
181 * (calls traps.c:handle_interruption) */
182 .macro def code
183 b intr_save
184 ldi \code, %r8
185 .align 32
186 .endm
187
188 /* Interrupt interruption handler
189 * (calls irq.c:do_cpu_irq_mask) */
190 .macro extint code
191 b intr_extint
192 mfsp %sr7,%r16
193 .align 32
194 .endm
195
196 .import os_hpmc, code
197
198 /* HPMC handler */
199 .macro hpmc code
200 nop /* must be a NOP, will be patched later */
201 load32 PA(os_hpmc), %r3
202 bv,n 0(%r3)
203 nop
204 .word 0 /* checksum (will be patched) */
205 .word PA(os_hpmc) /* address of handler */
206 .word 0 /* length of handler */
207 .endm
208
209 /*
210 * Performance Note: Instructions will be moved up into
211 * this part of the code later on, once we are sure
212 * that the tlb miss handlers are close to final form.
213 */
214
215 /* Register definitions for tlb miss handler macros */
216
217 va = r8 /* virtual address for which the trap occured */
218 spc = r24 /* space for which the trap occured */
219
220#ifndef __LP64__
221
222 /*
223 * itlb miss interruption handler (parisc 1.1 - 32 bit)
224 */
225
226 .macro itlb_11 code
227
228 mfctl %pcsq, spc
229 b itlb_miss_11
230 mfctl %pcoq, va
231
232 .align 32
233 .endm
234#endif
235
236 /*
237 * itlb miss interruption handler (parisc 2.0)
238 */
239
240 .macro itlb_20 code
241 mfctl %pcsq, spc
242#ifdef __LP64__
243 b itlb_miss_20w
244#else
245 b itlb_miss_20
246#endif
247 mfctl %pcoq, va
248
249 .align 32
250 .endm
251
252#ifndef __LP64__
253 /*
254 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255 *
256 * Note: naitlb misses will be treated
257 * as an ordinary itlb miss for now.
258 * However, note that naitlb misses
259 * have the faulting address in the
260 * IOR/ISR.
261 */
262
263 .macro naitlb_11 code
264
265 mfctl %isr,spc
266 b itlb_miss_11
267 mfctl %ior,va
268 /* FIXME: If user causes a naitlb miss, the priv level may not be in
269 * lower bits of va, where the itlb miss handler is expecting them
270 */
271
272 .align 32
273 .endm
274#endif
275
276 /*
277 * naitlb miss interruption handler (parisc 2.0)
278 *
279 * Note: naitlb misses will be treated
280 * as an ordinary itlb miss for now.
281 * However, note that naitlb misses
282 * have the faulting address in the
283 * IOR/ISR.
284 */
285
286 .macro naitlb_20 code
287
288 mfctl %isr,spc
289#ifdef __LP64__
290 b itlb_miss_20w
291#else
292 b itlb_miss_20
293#endif
294 mfctl %ior,va
295 /* FIXME: If user causes a naitlb miss, the priv level may not be in
296 * lower bits of va, where the itlb miss handler is expecting them
297 */
298
299 .align 32
300 .endm
301
302#ifndef __LP64__
303 /*
304 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
305 */
306
307 .macro dtlb_11 code
308
309 mfctl %isr, spc
310 b dtlb_miss_11
311 mfctl %ior, va
312
313 .align 32
314 .endm
315#endif
316
317 /*
318 * dtlb miss interruption handler (parisc 2.0)
319 */
320
321 .macro dtlb_20 code
322
323 mfctl %isr, spc
324#ifdef __LP64__
325 b dtlb_miss_20w
326#else
327 b dtlb_miss_20
328#endif
329 mfctl %ior, va
330
331 .align 32
332 .endm
333
334#ifndef __LP64__
335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336
337 .macro nadtlb_11 code
338
339 mfctl %isr,spc
340 b nadtlb_miss_11
341 mfctl %ior,va
342
343 .align 32
344 .endm
345#endif
346
347 /* nadtlb miss interruption handler (parisc 2.0) */
348
349 .macro nadtlb_20 code
350
351 mfctl %isr,spc
352#ifdef __LP64__
353 b nadtlb_miss_20w
354#else
355 b nadtlb_miss_20
356#endif
357 mfctl %ior,va
358
359 .align 32
360 .endm
361
362#ifndef __LP64__
363 /*
364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
365 */
366
367 .macro dbit_11 code
368
369 mfctl %isr,spc
370 b dbit_trap_11
371 mfctl %ior,va
372
373 .align 32
374 .endm
375#endif
376
377 /*
378 * dirty bit trap interruption handler (parisc 2.0)
379 */
380
381 .macro dbit_20 code
382
383 mfctl %isr,spc
384#ifdef __LP64__
385 b dbit_trap_20w
386#else
387 b dbit_trap_20
388#endif
389 mfctl %ior,va
390
391 .align 32
392 .endm
393
394 /* The following are simple 32 vs 64 bit instruction
395 * abstractions for the macros */
396 .macro EXTR reg1,start,length,reg2
397#ifdef __LP64__
398 extrd,u \reg1,32+\start,\length,\reg2
399#else
400 extrw,u \reg1,\start,\length,\reg2
401#endif
402 .endm
403
404 .macro DEP reg1,start,length,reg2
405#ifdef __LP64__
406 depd \reg1,32+\start,\length,\reg2
407#else
408 depw \reg1,\start,\length,\reg2
409#endif
410 .endm
411
412 .macro DEPI val,start,length,reg
413#ifdef __LP64__
414 depdi \val,32+\start,\length,\reg
415#else
416 depwi \val,\start,\length,\reg
417#endif
418 .endm
419
420 /* In LP64, the space contains part of the upper 32 bits of the
421 * fault. We have to extract this and place it in the va,
422 * zeroing the corresponding bits in the space register */
423 .macro space_adjust spc,va,tmp
424#ifdef __LP64__
425 extrd,u \spc,63,SPACEID_SHIFT,\tmp
426 depd %r0,63,SPACEID_SHIFT,\spc
427 depd \tmp,31,SPACEID_SHIFT,\va
428#endif
429 .endm
430
431 .import swapper_pg_dir,code
432
433 /* Get the pgd. For faults on space zero (kernel space), this
434 * is simply swapper_pg_dir. For user space faults, the
435 * pgd is stored in %cr25 */
436 .macro get_pgd spc,reg
437 ldil L%PA(swapper_pg_dir),\reg
438 ldo R%PA(swapper_pg_dir)(\reg),\reg
439 or,COND(=) %r0,\spc,%r0
440 mfctl %cr25,\reg
441 .endm
442
443 /*
444 space_check(spc,tmp,fault)
445
446 spc - The space we saw the fault with.
447 tmp - The place to store the current space.
448 fault - Function to call on failure.
449
450 Only allow faults on different spaces from the
451 currently active one if we're the kernel
452
453 */
454 .macro space_check spc,tmp,fault
455 mfsp %sr7,\tmp
456 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
457 * as kernel, so defeat the space
458 * check if it is */
459 copy \spc,\tmp
460 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
461 cmpb,COND(<>),n \tmp,\spc,\fault
462 .endm
463
464 /* Look up a PTE in a 2-Level scheme (faulting at each
465 * level if the entry isn't present
466 *
467 * NOTE: we use ldw even for LP64, since the short pointers
468 * can address up to 1TB
469 */
470 .macro L2_ptep pmd,pte,index,va,fault
471#if PT_NLEVELS == 3
472 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
473#else
474 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
475#endif
476 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
477 copy %r0,\pte
478 ldw,s \index(\pmd),\pmd
479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
481 copy \pmd,%r9
482#ifdef __LP64__
483 shld %r9,PxD_VALUE_SHIFT,\pmd
484#else
485 shlw %r9,PxD_VALUE_SHIFT,\pmd
486#endif
487 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
488 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
489 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
490 LDREG %r0(\pmd),\pte /* pmd is now pte */
491 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
492 .endm
493
494 /* Look up PTE in a 3-Level scheme.
495 *
496 * Here we implement a Hybrid L2/L3 scheme: we allocate the
497 * first pmd adjacent to the pgd. This means that we can
498 * subtract a constant offset to get to it. The pmd and pgd
499 * sizes are arranged so that a single pmd covers 4GB (giving
500 * a full LP64 process access to 8TB) so our lookups are
501 * effectively L2 for the first 4GB of the kernel (i.e. for
502 * all ILP32 processes and all the kernel for machines with
503 * under 4GB of memory) */
504 .macro L3_ptep pgd,pte,index,va,fault
505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
506 copy %r0,\pte
507 extrd,u,*= \va,31,32,%r0
508 ldw,s \index(\pgd),\pgd
509 extrd,u,*= \va,31,32,%r0
510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
511 extrd,u,*= \va,31,32,%r0
512 shld \pgd,PxD_VALUE_SHIFT,\index
513 extrd,u,*= \va,31,32,%r0
514 copy \index,\pgd
515 extrd,u,*<> \va,31,32,%r0
516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
517 L2_ptep \pgd,\pte,\index,\va,\fault
518 .endm
519
520 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
521 * don't needlessly dirty the cache line if it was already set */
522 .macro update_ptep ptep,pte,tmp,tmp1
523 ldi _PAGE_ACCESSED,\tmp1
524 or \tmp1,\pte,\tmp
525 and,COND(<>) \tmp1,\pte,%r0
526 STREG \tmp,0(\ptep)
527 .endm
528
529 /* Set the dirty bit (and accessed bit). No need to be
530 * clever, this is only used from the dirty fault */
531 .macro update_dirty ptep,pte,tmp
532 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
533 or \tmp,\pte,\pte
534 STREG \pte,0(\ptep)
535 .endm
536
537 /* Convert the pte and prot to tlb insertion values. How
538 * this happens is quite subtle, read below */
539 .macro make_insert_tlb spc,pte,prot
540 space_to_prot \spc \prot /* create prot id from space */
541 /* The following is the real subtlety. This is depositing
542 * T <-> _PAGE_REFTRAP
543 * D <-> _PAGE_DIRTY
544 * B <-> _PAGE_DMB (memory break)
545 *
546 * Then incredible subtlety: The access rights are
547 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
548 * See 3-14 of the parisc 2.0 manual
549 *
550 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
551 * trigger an access rights trap in user space if the user
552 * tries to read an unreadable page */
553 depd \pte,8,7,\prot
554
555 /* PAGE_USER indicates the page can be read with user privileges,
556 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
557 * contains _PAGE_READ */
558 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
559 depdi 7,11,3,\prot
560 /* If we're a gateway page, drop PL2 back to zero for promotion
561 * to kernel privilege (so we can execute the page as kernel).
562 * Any privilege promotion page always denys read and write */
563 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
564 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
565
566 /* Get rid of prot bits and convert to page addr for iitlbt */
567
568 depd %r0,63,PAGE_SHIFT,\pte
569 extrd,u \pte,56,32,\pte
570 .endm
571
572 /* Identical macro to make_insert_tlb above, except it
573 * makes the tlb entry for the differently formatted pa11
574 * insertion instructions */
575 .macro make_insert_tlb_11 spc,pte,prot
576 zdep \spc,30,15,\prot
577 dep \pte,8,7,\prot
578 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
579 depi 1,12,1,\prot
580 extru,= \pte,_PAGE_USER_BIT,1,%r0
581 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
582 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
583 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
584
585 /* Get rid of prot bits and convert to page addr for iitlba */
586
587 depi 0,31,12,\pte
588 extru \pte,24,25,\pte
589
590 .endm
591
592 /* This is for ILP32 PA2.0 only. The TLB insertion needs
593 * to extend into I/O space if the address is 0xfXXXXXXX
594 * so we extend the f's into the top word of the pte in
595 * this case */
596 .macro f_extend pte,tmp
597 extrd,s \pte,42,4,\tmp
598 addi,<> 1,\tmp,%r0
599 extrd,s \pte,63,25,\pte
600 .endm
601
602 /* The alias region is an 8MB aligned 16MB to do clear and
603 * copy user pages at addresses congruent with the user
604 * virtual address.
605 *
606 * To use the alias page, you set %r26 up with the to TLB
607 * entry (identifying the physical page) and %r23 up with
608 * the from tlb entry (or nothing if only a to entry---for
609 * clear_user_page_asm) */
610 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
611 cmpib,COND(<>),n 0,\spc,\fault
612 ldil L%(TMPALIAS_MAP_START),\tmp
613#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000)
614 /* on LP64, ldi will sign extend into the upper 32 bits,
615 * which is behaviour we don't want */
616 depdi 0,31,32,\tmp
617#endif
618 copy \va,\tmp1
619 DEPI 0,31,23,\tmp1
620 cmpb,COND(<>),n \tmp,\tmp1,\fault
621 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
622 depd,z \prot,8,7,\prot
623 /*
624 * OK, it is in the temp alias region, check whether "from" or "to".
625 * Check "subtle" note in pacache.S re: r23/r26.
626 */
627#ifdef __LP64__
628 extrd,u,*= \va,41,1,%r0
629#else
630 extrw,u,= \va,9,1,%r0
631#endif
632 or,COND(tr) %r23,%r0,\pte
633 or %r26,%r0,\pte
634 .endm
635
636
637 /*
638 * Align fault_vector_20 on 4K boundary so that both
639 * fault_vector_11 and fault_vector_20 are on the
640 * same page. This is only necessary as long as we
641 * write protect the kernel text, which we may stop
642 * doing once we use large page translations to cover
643 * the static part of the kernel address space.
644 */
645
646 .export fault_vector_20
647
648 .text
649
650 .align 4096
651
652fault_vector_20:
653 /* First vector is invalid (0) */
654 .ascii "cows can fly"
655 .byte 0
656 .align 32
657
658 hpmc 1
659 def 2
660 def 3
661 extint 4
662 def 5
663 itlb_20 6
664 def 7
665 def 8
666 def 9
667 def 10
668 def 11
669 def 12
670 def 13
671 def 14
672 dtlb_20 15
673#if 0
674 naitlb_20 16
675#else
676 def 16
677#endif
678 nadtlb_20 17
679 def 18
680 def 19
681 dbit_20 20
682 def 21
683 def 22
684 def 23
685 def 24
686 def 25
687 def 26
688 def 27
689 def 28
690 def 29
691 def 30
692 def 31
693
694#ifndef __LP64__
695
696 .export fault_vector_11
697
698 .align 2048
699
700fault_vector_11:
701 /* First vector is invalid (0) */
702 .ascii "cows can fly"
703 .byte 0
704 .align 32
705
706 hpmc 1
707 def 2
708 def 3
709 extint 4
710 def 5
711 itlb_11 6
712 def 7
713 def 8
714 def 9
715 def 10
716 def 11
717 def 12
718 def 13
719 def 14
720 dtlb_11 15
721#if 0
722 naitlb_11 16
723#else
724 def 16
725#endif
726 nadtlb_11 17
727 def 18
728 def 19
729 dbit_11 20
730 def 21
731 def 22
732 def 23
733 def 24
734 def 25
735 def 26
736 def 27
737 def 28
738 def 29
739 def 30
740 def 31
741
742#endif
743
744 .import handle_interruption,code
745 .import do_cpu_irq_mask,code
746
747 /*
748 * r26 = function to be called
749 * r25 = argument to pass in
750 * r24 = flags for do_fork()
751 *
752 * Kernel threads don't ever return, so they don't need
753 * a true register context. We just save away the arguments
754 * for copy_thread/ret_ to properly set up the child.
755 */
756
757#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
758#define CLONE_UNTRACED 0x00800000
759
760 .export __kernel_thread, code
761 .import do_fork
762__kernel_thread:
763 STREG %r2, -RP_OFFSET(%r30)
764
765 copy %r30, %r1
766 ldo PT_SZ_ALGN(%r30),%r30
767#ifdef __LP64__
768 /* Yo, function pointers in wide mode are little structs... -PB */
769 ldd 24(%r26), %r2
770 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
771 ldd 16(%r26), %r26
772
773 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
774 copy %r0, %r22 /* user_tid */
775#endif
776 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
777 STREG %r25, PT_GR25(%r1)
778 ldil L%CLONE_UNTRACED, %r26
779 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
780 or %r26, %r24, %r26 /* will have kernel mappings. */
781 ldi 1, %r25 /* stack_start, signals kernel thread */
782 stw %r0, -52(%r30) /* user_tid */
783#ifdef __LP64__
784 ldo -16(%r30),%r29 /* Reference param save area */
785#endif
786 BL do_fork, %r2
787 copy %r1, %r24 /* pt_regs */
788
789 /* Parent Returns here */
790
791 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
792 ldo -PT_SZ_ALGN(%r30), %r30
793 bv %r0(%r2)
794 nop
795
796 /*
797 * Child Returns here
798 *
799 * copy_thread moved args from temp save area set up above
800 * into task save area.
801 */
802
803 .export ret_from_kernel_thread
804ret_from_kernel_thread:
805
806 /* Call schedule_tail first though */
807 BL schedule_tail, %r2
808 nop
809
810 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
811 LDREG TASK_PT_GR25(%r1), %r26
812#ifdef __LP64__
813 LDREG TASK_PT_GR27(%r1), %r27
814 LDREG TASK_PT_GR22(%r1), %r22
815#endif
816 LDREG TASK_PT_GR26(%r1), %r1
817 ble 0(%sr7, %r1)
818 copy %r31, %r2
819
820#ifdef __LP64__
821 ldo -16(%r30),%r29 /* Reference param save area */
822 loadgp /* Thread could have been in a module */
823#endif
824 b sys_exit
825 ldi 0, %r26
826
827 .import sys_execve, code
828 .export __execve, code
829__execve:
830 copy %r2, %r15
831 copy %r30, %r16
832 ldo PT_SZ_ALGN(%r30), %r30
833 STREG %r26, PT_GR26(%r16)
834 STREG %r25, PT_GR25(%r16)
835 STREG %r24, PT_GR24(%r16)
836#ifdef __LP64__
837 ldo -16(%r30),%r29 /* Reference param save area */
838#endif
839 BL sys_execve, %r2
840 copy %r16, %r26
841
842 cmpib,=,n 0,%r28,intr_return /* forward */
843
844 /* yes, this will trap and die. */
845 copy %r15, %r2
846 copy %r16, %r30
847 bv %r0(%r2)
848 nop
849
850 .align 4
851
852 /*
853 * struct task_struct *_switch_to(struct task_struct *prev,
854 * struct task_struct *next)
855 *
856 * switch kernel stacks and return prev */
857 .export _switch_to, code
858_switch_to:
859 STREG %r2, -RP_OFFSET(%r30)
860
861 callee_save
862
863 load32 _switch_to_ret, %r2
864
865 STREG %r2, TASK_PT_KPC(%r26)
866 LDREG TASK_PT_KPC(%r25), %r2
867
868 STREG %r30, TASK_PT_KSP(%r26)
869 LDREG TASK_PT_KSP(%r25), %r30
870 LDREG TASK_THREAD_INFO(%r25), %r25
871 bv %r0(%r2)
872 mtctl %r25,%cr30
873
874_switch_to_ret:
875 mtctl %r0, %cr0 /* Needed for single stepping */
876 callee_rest
877
878 LDREG -RP_OFFSET(%r30), %r2
879 bv %r0(%r2)
880 copy %r26, %r28
881
882 /*
883 * Common rfi return path for interruptions, kernel execve, and
884 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
885 * return via this path if the signal was received when the process
886 * was running; if the process was blocked on a syscall then the
887 * normal syscall_exit path is used. All syscalls for traced
888 * proceses exit via intr_restore.
889 *
890 * XXX If any syscalls that change a processes space id ever exit
891 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
892 * adjust IASQ[0..1].
893 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 */
895
896 .align 4096
897
898 .export syscall_exit_rfi
899syscall_exit_rfi:
900 mfctl %cr30,%r16
901 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
902 ldo TASK_REGS(%r16),%r16
903 /* Force iaoq to userspace, as the user has had access to our current
904 * context via sigcontext. Also Filter the PSW for the same reason.
905 */
906 LDREG PT_IAOQ0(%r16),%r19
907 depi 3,31,2,%r19
908 STREG %r19,PT_IAOQ0(%r16)
909 LDREG PT_IAOQ1(%r16),%r19
910 depi 3,31,2,%r19
911 STREG %r19,PT_IAOQ1(%r16)
912 LDREG PT_PSW(%r16),%r19
913 load32 USER_PSW_MASK,%r1
914#ifdef __LP64__
915 load32 USER_PSW_HI_MASK,%r20
916 depd %r20,31,32,%r1
917#endif
918 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
919 load32 USER_PSW,%r1
920 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
921 STREG %r19,PT_PSW(%r16)
922
923 /*
924 * If we aren't being traced, we never saved space registers
925 * (we don't store them in the sigcontext), so set them
926 * to "proper" values now (otherwise we'll wind up restoring
927 * whatever was last stored in the task structure, which might
928 * be inconsistent if an interrupt occured while on the gateway
929 * page) Note that we may be "trashing" values the user put in
930 * them, but we don't support the the user changing them.
931 */
932
933 STREG %r0,PT_SR2(%r16)
934 mfsp %sr3,%r19
935 STREG %r19,PT_SR0(%r16)
936 STREG %r19,PT_SR1(%r16)
937 STREG %r19,PT_SR3(%r16)
938 STREG %r19,PT_SR4(%r16)
939 STREG %r19,PT_SR5(%r16)
940 STREG %r19,PT_SR6(%r16)
941 STREG %r19,PT_SR7(%r16)
942
943intr_return:
944 /* NOTE: Need to enable interrupts incase we schedule. */
945 ssm PSW_SM_I, %r0
946
947 /* Check for software interrupts */
948
949 .import irq_stat,data
950
951 load32 irq_stat,%r19
952#ifdef CONFIG_SMP
953 mfctl %cr30,%r1
954 ldw TI_CPU(%r1),%r1 /* get cpu # - int */
955 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
956 ** irq_stat[] is defined using ____cacheline_aligned.
957 */
958#ifdef __LP64__
959 shld %r1, 6, %r20
960#else
961 shlw %r1, 5, %r20
962#endif
963 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
964#endif /* CONFIG_SMP */
965
966 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
967 cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
968
969intr_check_resched:
970
971 /* check for reschedule */
972 mfctl %cr30,%r1
973 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
974 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
975
976intr_check_sig:
977 /* As above */
978 mfctl %cr30,%r1
979 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
980 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
981
982intr_restore:
983 copy %r16,%r29
984 ldo PT_FR31(%r29),%r1
985 rest_fp %r1
986 rest_general %r29
987
Grant Grundler896a3752005-10-21 22:40:07 -0400988 /* inverse of virt_map */
989 pcxt_ssm_bug
990 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 tophys_r1 %r29
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
993 /* Restore space id's and special cr's from PT_REGS
Grant Grundler896a3752005-10-21 22:40:07 -0400994 * structure pointed to by r29
995 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 rest_specials %r29
997
Grant Grundler896a3752005-10-21 22:40:07 -0400998 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
999 * It also restores r1 and r30.
1000 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 rest_stack
1002
1003 rfi
1004 nop
1005 nop
1006 nop
1007 nop
1008 nop
1009 nop
1010 nop
1011 nop
1012
1013 .import do_softirq,code
1014intr_do_softirq:
1015 bl do_softirq,%r2
1016#ifdef __LP64__
1017 ldo -16(%r30),%r29 /* Reference param save area */
1018#else
1019 nop
1020#endif
1021 b intr_check_resched
1022 nop
1023
1024 .import schedule,code
1025intr_do_resched:
1026 /* Only do reschedule if we are returning to user space */
1027 LDREG PT_IASQ0(%r16), %r20
1028 CMPIB= 0,%r20,intr_restore /* backward */
1029 nop
1030 LDREG PT_IASQ1(%r16), %r20
1031 CMPIB= 0,%r20,intr_restore /* backward */
1032 nop
1033
1034#ifdef __LP64__
1035 ldo -16(%r30),%r29 /* Reference param save area */
1036#endif
1037
1038 ldil L%intr_check_sig, %r2
1039 b schedule
1040 ldo R%intr_check_sig(%r2), %r2
1041
1042
1043 .import do_signal,code
1044intr_do_signal:
1045 /*
1046 This check is critical to having LWS
1047 working. The IASQ is zero on the gateway
1048 page and we cannot deliver any signals until
1049 we get off the gateway page.
1050
1051 Only do signals if we are returning to user space
1052 */
1053 LDREG PT_IASQ0(%r16), %r20
1054 CMPIB= 0,%r20,intr_restore /* backward */
1055 nop
1056 LDREG PT_IASQ1(%r16), %r20
1057 CMPIB= 0,%r20,intr_restore /* backward */
1058 nop
1059
1060 copy %r0, %r24 /* unsigned long in_syscall */
1061 copy %r16, %r25 /* struct pt_regs *regs */
1062#ifdef __LP64__
1063 ldo -16(%r30),%r29 /* Reference param save area */
1064#endif
1065
1066 BL do_signal,%r2
1067 copy %r0, %r26 /* sigset_t *oldset = NULL */
1068
1069 b intr_check_sig
1070 nop
1071
1072 /*
1073 * External interrupts.
1074 */
1075
1076intr_extint:
1077 CMPIB=,n 0,%r16,1f
1078 get_stack_use_cr30
1079 b,n 3f
1080
10811:
1082#if 0 /* Interrupt Stack support not working yet! */
1083 mfctl %cr31,%r1
1084 copy %r30,%r17
1085 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1086#ifdef __LP64__
1087 depdi 0,63,15,%r17
1088#else
1089 depi 0,31,15,%r17
1090#endif
1091 CMPB=,n %r1,%r17,2f
1092 get_stack_use_cr31
1093 b,n 3f
1094#endif
10952:
1096 get_stack_use_r30
1097
10983:
1099 save_specials %r29
1100 virt_map
1101 save_general %r29
1102
1103 ldo PT_FR0(%r29), %r24
1104 save_fp %r24
1105
1106 loadgp
1107
1108 copy %r29, %r26 /* arg0 is pt_regs */
1109 copy %r29, %r16 /* save pt_regs */
1110
1111 ldil L%intr_return, %r2
1112
1113#ifdef __LP64__
1114 ldo -16(%r30),%r29 /* Reference param save area */
1115#endif
1116
1117 b do_cpu_irq_mask
1118 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1119
1120
1121 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1122
1123 .export intr_save, code /* for os_hpmc */
1124
1125intr_save:
1126 mfsp %sr7,%r16
1127 CMPIB=,n 0,%r16,1f
1128 get_stack_use_cr30
1129 b 2f
1130 copy %r8,%r26
1131
11321:
1133 get_stack_use_r30
1134 copy %r8,%r26
1135
11362:
1137 save_specials %r29
1138
1139 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1140
1141 /*
1142 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1143 * traps.c.
1144 * 2) Once we start executing code above 4 Gb, we need
1145 * to adjust iasq/iaoq here in the same way we
1146 * adjust isr/ior below.
1147 */
1148
1149 CMPIB=,n 6,%r26,skip_save_ior
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
1152 mfctl %cr20, %r16 /* isr */
Grant Grundler896a3752005-10-21 22:40:07 -04001153 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 mfctl %cr21, %r17 /* ior */
1155
Grant Grundler896a3752005-10-21 22:40:07 -04001156
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157#ifdef __LP64__
1158 /*
1159 * If the interrupted code was running with W bit off (32 bit),
1160 * clear the b bits (bits 0 & 1) in the ior.
Grant Grundler896a3752005-10-21 22:40:07 -04001161 * save_specials left ipsw value in r8 for us to test.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 */
1163 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1164 depdi 0,1,2,%r17
1165
1166 /*
1167 * FIXME: This code has hardwired assumptions about the split
1168 * between space bits and offset bits. This will change
1169 * when we allow alternate page sizes.
1170 */
1171
1172 /* adjust isr/ior. */
1173
1174 extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
1175 depd %r1,31,7,%r17 /* deposit them into ior */
1176 depdi 0,63,7,%r16 /* clear them from isr */
1177#endif
1178 STREG %r16, PT_ISR(%r29)
1179 STREG %r17, PT_IOR(%r29)
1180
1181
1182skip_save_ior:
1183 virt_map
1184 save_general %r29
1185
1186 ldo PT_FR0(%r29), %r25
1187 save_fp %r25
1188
1189 loadgp
1190
1191 copy %r29, %r25 /* arg1 is pt_regs */
1192#ifdef __LP64__
1193 ldo -16(%r30),%r29 /* Reference param save area */
1194#endif
1195
1196 ldil L%intr_check_sig, %r2
1197 copy %r25, %r16 /* save pt_regs */
1198
1199 b handle_interruption
1200 ldo R%intr_check_sig(%r2), %r2
1201
1202
1203 /*
1204 * Note for all tlb miss handlers:
1205 *
1206 * cr24 contains a pointer to the kernel address space
1207 * page directory.
1208 *
1209 * cr25 contains a pointer to the current user address
1210 * space page directory.
1211 *
1212 * sr3 will contain the space id of the user address space
1213 * of the current running thread while that thread is
1214 * running in the kernel.
1215 */
1216
1217 /*
1218 * register number allocations. Note that these are all
1219 * in the shadowed registers
1220 */
1221
1222 t0 = r1 /* temporary register 0 */
1223 va = r8 /* virtual address for which the trap occured */
1224 t1 = r9 /* temporary register 1 */
1225 pte = r16 /* pte/phys page # */
1226 prot = r17 /* prot bits */
1227 spc = r24 /* space for which the trap occured */
1228 ptp = r25 /* page directory/page table pointer */
1229
1230#ifdef __LP64__
1231
1232dtlb_miss_20w:
1233 space_adjust spc,va,t0
1234 get_pgd spc,ptp
1235 space_check spc,t0,dtlb_fault
1236
1237 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1238
1239 update_ptep ptp,pte,t0,t1
1240
1241 make_insert_tlb spc,pte,prot
1242
1243 idtlbt pte,prot
1244
1245 rfir
1246 nop
1247
1248dtlb_check_alias_20w:
1249 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1250
1251 idtlbt pte,prot
1252
1253 rfir
1254 nop
1255
1256nadtlb_miss_20w:
1257 space_adjust spc,va,t0
1258 get_pgd spc,ptp
1259 space_check spc,t0,nadtlb_fault
1260
1261 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
1262
1263 update_ptep ptp,pte,t0,t1
1264
1265 make_insert_tlb spc,pte,prot
1266
1267 idtlbt pte,prot
1268
1269 rfir
1270 nop
1271
1272nadtlb_check_flush_20w:
1273 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1274
1275 /* Insert a "flush only" translation */
1276
1277 depdi,z 7,7,3,prot
1278 depdi 1,10,1,prot
1279
1280 /* Get rid of prot bits and convert to page addr for idtlbt */
1281
1282 depdi 0,63,12,pte
1283 extrd,u pte,56,52,pte
1284 idtlbt pte,prot
1285
1286 rfir
1287 nop
1288
1289#else
1290
1291dtlb_miss_11:
1292 get_pgd spc,ptp
1293
1294 space_check spc,t0,dtlb_fault
1295
1296 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1297
1298 update_ptep ptp,pte,t0,t1
1299
1300 make_insert_tlb_11 spc,pte,prot
1301
1302 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1303 mtsp spc,%sr1
1304
1305 idtlba pte,(%sr1,va)
1306 idtlbp prot,(%sr1,va)
1307
1308 mtsp t0, %sr1 /* Restore sr1 */
1309
1310 rfir
1311 nop
1312
1313dtlb_check_alias_11:
1314
1315 /* Check to see if fault is in the temporary alias region */
1316
1317 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1318 ldil L%(TMPALIAS_MAP_START),t0
1319 copy va,t1
1320 depwi 0,31,23,t1
1321 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1322 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1323 depw,z prot,8,7,prot
1324
1325 /*
1326 * OK, it is in the temp alias region, check whether "from" or "to".
1327 * Check "subtle" note in pacache.S re: r23/r26.
1328 */
1329
1330 extrw,u,= va,9,1,r0
1331 or,tr %r23,%r0,pte /* If "from" use "from" page */
1332 or %r26,%r0,pte /* else "to", use "to" page */
1333
1334 idtlba pte,(va)
1335 idtlbp prot,(va)
1336
1337 rfir
1338 nop
1339
1340nadtlb_miss_11:
1341 get_pgd spc,ptp
1342
1343 space_check spc,t0,nadtlb_fault
1344
1345 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
1346
1347 update_ptep ptp,pte,t0,t1
1348
1349 make_insert_tlb_11 spc,pte,prot
1350
1351
1352 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1353 mtsp spc,%sr1
1354
1355 idtlba pte,(%sr1,va)
1356 idtlbp prot,(%sr1,va)
1357
1358 mtsp t0, %sr1 /* Restore sr1 */
1359
1360 rfir
1361 nop
1362
1363nadtlb_check_flush_11:
1364 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1365
1366 /* Insert a "flush only" translation */
1367
1368 zdepi 7,7,3,prot
1369 depi 1,10,1,prot
1370
1371 /* Get rid of prot bits and convert to page addr for idtlba */
1372
1373 depi 0,31,12,pte
1374 extru pte,24,25,pte
1375
1376 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1377 mtsp spc,%sr1
1378
1379 idtlba pte,(%sr1,va)
1380 idtlbp prot,(%sr1,va)
1381
1382 mtsp t0, %sr1 /* Restore sr1 */
1383
1384 rfir
1385 nop
1386
1387dtlb_miss_20:
1388 space_adjust spc,va,t0
1389 get_pgd spc,ptp
1390 space_check spc,t0,dtlb_fault
1391
1392 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1393
1394 update_ptep ptp,pte,t0,t1
1395
1396 make_insert_tlb spc,pte,prot
1397
1398 f_extend pte,t0
1399
1400 idtlbt pte,prot
1401
1402 rfir
1403 nop
1404
1405dtlb_check_alias_20:
1406 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1407
1408 idtlbt pte,prot
1409
1410 rfir
1411 nop
1412
1413nadtlb_miss_20:
1414 get_pgd spc,ptp
1415
1416 space_check spc,t0,nadtlb_fault
1417
1418 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
1419
1420 update_ptep ptp,pte,t0,t1
1421
1422 make_insert_tlb spc,pte,prot
1423
1424 f_extend pte,t0
1425
1426 idtlbt pte,prot
1427
1428 rfir
1429 nop
1430
1431nadtlb_check_flush_20:
1432 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1433
1434 /* Insert a "flush only" translation */
1435
1436 depdi,z 7,7,3,prot
1437 depdi 1,10,1,prot
1438
1439 /* Get rid of prot bits and convert to page addr for idtlbt */
1440
1441 depdi 0,63,12,pte
1442 extrd,u pte,56,32,pte
1443 idtlbt pte,prot
1444
1445 rfir
1446 nop
1447#endif
1448
1449nadtlb_emulate:
1450
1451 /*
1452 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1453 * probei instructions. We don't want to fault for these
1454 * instructions (not only does it not make sense, it can cause
1455 * deadlocks, since some flushes are done with the mmap
1456 * semaphore held). If the translation doesn't exist, we can't
1457 * insert a translation, so have to emulate the side effects
1458 * of the instruction. Since we don't insert a translation
1459 * we can get a lot of faults during a flush loop, so it makes
1460 * sense to try to do it here with minimum overhead. We only
1461 * emulate fdc,fic,pdc,probew,prober instructions whose base
1462 * and index registers are not shadowed. We defer everything
1463 * else to the "slow" path.
1464 */
1465
1466 mfctl %cr19,%r9 /* Get iir */
1467
1468 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1469 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1470
1471 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1472 ldi 0x280,%r16
1473 and %r9,%r16,%r17
1474 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1475 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1476 BL get_register,%r25
1477 extrw,u %r9,15,5,%r8 /* Get index register # */
1478 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1479 copy %r1,%r24
1480 BL get_register,%r25
1481 extrw,u %r9,10,5,%r8 /* Get base register # */
1482 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1483 BL set_register,%r25
1484 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1485
1486nadtlb_nullify:
Grant Grundler896a3752005-10-21 22:40:07 -04001487 mfctl %ipsw,%r8
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 ldil L%PSW_N,%r9
1489 or %r8,%r9,%r8 /* Set PSW_N */
Grant Grundler896a3752005-10-21 22:40:07 -04001490 mtctl %r8,%ipsw
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492 rfir
1493 nop
1494
1495 /*
1496 When there is no translation for the probe address then we
1497 must nullify the insn and return zero in the target regsiter.
1498 This will indicate to the calling code that it does not have
1499 write/read privileges to this address.
1500
1501 This should technically work for prober and probew in PA 1.1,
1502 and also probe,r and probe,w in PA 2.0
1503
1504 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1505 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1506
1507 */
1508nadtlb_probe_check:
1509 ldi 0x80,%r16
1510 and %r9,%r16,%r17
1511 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1512 BL get_register,%r25 /* Find the target register */
1513 extrw,u %r9,31,5,%r8 /* Get target register */
1514 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1515 BL set_register,%r25
1516 copy %r0,%r1 /* Write zero to target register */
1517 b nadtlb_nullify /* Nullify return insn */
1518 nop
1519
1520
1521#ifdef __LP64__
1522itlb_miss_20w:
1523
1524 /*
1525 * I miss is a little different, since we allow users to fault
1526 * on the gateway page which is in the kernel address space.
1527 */
1528
1529 space_adjust spc,va,t0
1530 get_pgd spc,ptp
1531 space_check spc,t0,itlb_fault
1532
1533 L3_ptep ptp,pte,t0,va,itlb_fault
1534
1535 update_ptep ptp,pte,t0,t1
1536
1537 make_insert_tlb spc,pte,prot
1538
1539 iitlbt pte,prot
1540
1541 rfir
1542 nop
1543
1544#else
1545
1546itlb_miss_11:
1547 get_pgd spc,ptp
1548
1549 space_check spc,t0,itlb_fault
1550
1551 L2_ptep ptp,pte,t0,va,itlb_fault
1552
1553 update_ptep ptp,pte,t0,t1
1554
1555 make_insert_tlb_11 spc,pte,prot
1556
1557 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1558 mtsp spc,%sr1
1559
1560 iitlba pte,(%sr1,va)
1561 iitlbp prot,(%sr1,va)
1562
1563 mtsp t0, %sr1 /* Restore sr1 */
1564
1565 rfir
1566 nop
1567
1568itlb_miss_20:
1569 get_pgd spc,ptp
1570
1571 space_check spc,t0,itlb_fault
1572
1573 L2_ptep ptp,pte,t0,va,itlb_fault
1574
1575 update_ptep ptp,pte,t0,t1
1576
1577 make_insert_tlb spc,pte,prot
1578
1579 f_extend pte,t0
1580
1581 iitlbt pte,prot
1582
1583 rfir
1584 nop
1585
1586#endif
1587
1588#ifdef __LP64__
1589
1590dbit_trap_20w:
1591 space_adjust spc,va,t0
1592 get_pgd spc,ptp
1593 space_check spc,t0,dbit_fault
1594
1595 L3_ptep ptp,pte,t0,va,dbit_fault
1596
1597#ifdef CONFIG_SMP
1598 CMPIB=,n 0,spc,dbit_nolock_20w
1599 load32 PA(pa_dbit_lock),t0
1600
1601dbit_spin_20w:
1602 ldcw 0(t0),t1
1603 cmpib,= 0,t1,dbit_spin_20w
1604 nop
1605
1606dbit_nolock_20w:
1607#endif
1608 update_dirty ptp,pte,t1
1609
1610 make_insert_tlb spc,pte,prot
1611
1612 idtlbt pte,prot
1613#ifdef CONFIG_SMP
1614 CMPIB=,n 0,spc,dbit_nounlock_20w
1615 ldi 1,t1
1616 stw t1,0(t0)
1617
1618dbit_nounlock_20w:
1619#endif
1620
1621 rfir
1622 nop
1623#else
1624
1625dbit_trap_11:
1626
1627 get_pgd spc,ptp
1628
1629 space_check spc,t0,dbit_fault
1630
1631 L2_ptep ptp,pte,t0,va,dbit_fault
1632
1633#ifdef CONFIG_SMP
1634 CMPIB=,n 0,spc,dbit_nolock_11
1635 load32 PA(pa_dbit_lock),t0
1636
1637dbit_spin_11:
1638 ldcw 0(t0),t1
1639 cmpib,= 0,t1,dbit_spin_11
1640 nop
1641
1642dbit_nolock_11:
1643#endif
1644 update_dirty ptp,pte,t1
1645
1646 make_insert_tlb_11 spc,pte,prot
1647
1648 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1649 mtsp spc,%sr1
1650
1651 idtlba pte,(%sr1,va)
1652 idtlbp prot,(%sr1,va)
1653
1654 mtsp t1, %sr1 /* Restore sr1 */
1655#ifdef CONFIG_SMP
1656 CMPIB=,n 0,spc,dbit_nounlock_11
1657 ldi 1,t1
1658 stw t1,0(t0)
1659
1660dbit_nounlock_11:
1661#endif
1662
1663 rfir
1664 nop
1665
1666dbit_trap_20:
1667 get_pgd spc,ptp
1668
1669 space_check spc,t0,dbit_fault
1670
1671 L2_ptep ptp,pte,t0,va,dbit_fault
1672
1673#ifdef CONFIG_SMP
1674 CMPIB=,n 0,spc,dbit_nolock_20
1675 load32 PA(pa_dbit_lock),t0
1676
1677dbit_spin_20:
1678 ldcw 0(t0),t1
1679 cmpib,= 0,t1,dbit_spin_20
1680 nop
1681
1682dbit_nolock_20:
1683#endif
1684 update_dirty ptp,pte,t1
1685
1686 make_insert_tlb spc,pte,prot
1687
1688 f_extend pte,t1
1689
1690 idtlbt pte,prot
1691
1692#ifdef CONFIG_SMP
1693 CMPIB=,n 0,spc,dbit_nounlock_20
1694 ldi 1,t1
1695 stw t1,0(t0)
1696
1697dbit_nounlock_20:
1698#endif
1699
1700 rfir
1701 nop
1702#endif
1703
1704 .import handle_interruption,code
1705
1706kernel_bad_space:
1707 b intr_save
1708 ldi 31,%r8 /* Use an unused code */
1709
1710dbit_fault:
1711 b intr_save
1712 ldi 20,%r8
1713
1714itlb_fault:
1715 b intr_save
1716 ldi 6,%r8
1717
1718nadtlb_fault:
1719 b intr_save
1720 ldi 17,%r8
1721
1722dtlb_fault:
1723 b intr_save
1724 ldi 15,%r8
1725
1726 /* Register saving semantics for system calls:
1727
1728 %r1 clobbered by system call macro in userspace
1729 %r2 saved in PT_REGS by gateway page
1730 %r3 - %r18 preserved by C code (saved by signal code)
1731 %r19 - %r20 saved in PT_REGS by gateway page
1732 %r21 - %r22 non-standard syscall args
1733 stored in kernel stack by gateway page
1734 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1735 %r27 - %r30 saved in PT_REGS by gateway page
1736 %r31 syscall return pointer
1737 */
1738
1739 /* Floating point registers (FIXME: what do we do with these?)
1740
1741 %fr0 - %fr3 status/exception, not preserved
1742 %fr4 - %fr7 arguments
1743 %fr8 - %fr11 not preserved by C code
1744 %fr12 - %fr21 preserved by C code
1745 %fr22 - %fr31 not preserved by C code
1746 */
1747
1748 .macro reg_save regs
1749 STREG %r3, PT_GR3(\regs)
1750 STREG %r4, PT_GR4(\regs)
1751 STREG %r5, PT_GR5(\regs)
1752 STREG %r6, PT_GR6(\regs)
1753 STREG %r7, PT_GR7(\regs)
1754 STREG %r8, PT_GR8(\regs)
1755 STREG %r9, PT_GR9(\regs)
1756 STREG %r10,PT_GR10(\regs)
1757 STREG %r11,PT_GR11(\regs)
1758 STREG %r12,PT_GR12(\regs)
1759 STREG %r13,PT_GR13(\regs)
1760 STREG %r14,PT_GR14(\regs)
1761 STREG %r15,PT_GR15(\regs)
1762 STREG %r16,PT_GR16(\regs)
1763 STREG %r17,PT_GR17(\regs)
1764 STREG %r18,PT_GR18(\regs)
1765 .endm
1766
1767 .macro reg_restore regs
1768 LDREG PT_GR3(\regs), %r3
1769 LDREG PT_GR4(\regs), %r4
1770 LDREG PT_GR5(\regs), %r5
1771 LDREG PT_GR6(\regs), %r6
1772 LDREG PT_GR7(\regs), %r7
1773 LDREG PT_GR8(\regs), %r8
1774 LDREG PT_GR9(\regs), %r9
1775 LDREG PT_GR10(\regs),%r10
1776 LDREG PT_GR11(\regs),%r11
1777 LDREG PT_GR12(\regs),%r12
1778 LDREG PT_GR13(\regs),%r13
1779 LDREG PT_GR14(\regs),%r14
1780 LDREG PT_GR15(\regs),%r15
1781 LDREG PT_GR16(\regs),%r16
1782 LDREG PT_GR17(\regs),%r17
1783 LDREG PT_GR18(\regs),%r18
1784 .endm
1785
1786 .export sys_fork_wrapper
1787 .export child_return
1788sys_fork_wrapper:
1789 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1790 ldo TASK_REGS(%r1),%r1
1791 reg_save %r1
1792 mfctl %cr27, %r3
1793 STREG %r3, PT_CR27(%r1)
1794
1795 STREG %r2,-RP_OFFSET(%r30)
1796 ldo FRAME_SIZE(%r30),%r30
1797#ifdef __LP64__
1798 ldo -16(%r30),%r29 /* Reference param save area */
1799#endif
1800
1801 /* These are call-clobbered registers and therefore
1802 also syscall-clobbered (we hope). */
1803 STREG %r2,PT_GR19(%r1) /* save for child */
1804 STREG %r30,PT_GR21(%r1)
1805
1806 LDREG PT_GR30(%r1),%r25
1807 copy %r1,%r24
1808 BL sys_clone,%r2
1809 ldi SIGCHLD,%r26
1810
1811 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1812wrapper_exit:
1813 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1814 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1815 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1816
1817 LDREG PT_CR27(%r1), %r3
1818 mtctl %r3, %cr27
1819 reg_restore %r1
1820
1821 /* strace expects syscall # to be preserved in r20 */
1822 ldi __NR_fork,%r20
1823 bv %r0(%r2)
1824 STREG %r20,PT_GR20(%r1)
1825
1826 /* Set the return value for the child */
1827child_return:
1828 BL schedule_tail, %r2
1829 nop
1830
1831 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1832 LDREG TASK_PT_GR19(%r1),%r2
1833 b wrapper_exit
1834 copy %r0,%r28
1835
1836
1837 .export sys_clone_wrapper
1838sys_clone_wrapper:
1839 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1840 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1841 reg_save %r1
1842 mfctl %cr27, %r3
1843 STREG %r3, PT_CR27(%r1)
1844
1845 STREG %r2,-RP_OFFSET(%r30)
1846 ldo FRAME_SIZE(%r30),%r30
1847#ifdef __LP64__
1848 ldo -16(%r30),%r29 /* Reference param save area */
1849#endif
1850
1851 STREG %r2,PT_GR19(%r1) /* save for child */
1852 STREG %r30,PT_GR21(%r1)
1853 BL sys_clone,%r2
1854 copy %r1,%r24
1855
1856 b wrapper_exit
1857 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1858
1859 .export sys_vfork_wrapper
1860sys_vfork_wrapper:
1861 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1862 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1863 reg_save %r1
1864 mfctl %cr27, %r3
1865 STREG %r3, PT_CR27(%r1)
1866
1867 STREG %r2,-RP_OFFSET(%r30)
1868 ldo FRAME_SIZE(%r30),%r30
1869#ifdef __LP64__
1870 ldo -16(%r30),%r29 /* Reference param save area */
1871#endif
1872
1873 STREG %r2,PT_GR19(%r1) /* save for child */
1874 STREG %r30,PT_GR21(%r1)
1875
1876 BL sys_vfork,%r2
1877 copy %r1,%r26
1878
1879 b wrapper_exit
1880 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1881
1882
1883 .macro execve_wrapper execve
1884 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1885 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1886
1887 /*
1888 * Do we need to save/restore r3-r18 here?
1889 * I don't think so. why would new thread need old
1890 * threads registers?
1891 */
1892
1893 /* %arg0 - %arg3 are already saved for us. */
1894
1895 STREG %r2,-RP_OFFSET(%r30)
1896 ldo FRAME_SIZE(%r30),%r30
1897#ifdef __LP64__
1898 ldo -16(%r30),%r29 /* Reference param save area */
1899#endif
1900 bl \execve,%r2
1901 copy %r1,%arg0
1902
1903 ldo -FRAME_SIZE(%r30),%r30
1904 LDREG -RP_OFFSET(%r30),%r2
1905
1906 /* If exec succeeded we need to load the args */
1907
1908 ldo -1024(%r0),%r1
1909 cmpb,>>= %r28,%r1,error_\execve
1910 copy %r2,%r19
1911
1912error_\execve:
1913 bv %r0(%r19)
1914 nop
1915 .endm
1916
1917 .export sys_execve_wrapper
1918 .import sys_execve
1919
1920sys_execve_wrapper:
1921 execve_wrapper sys_execve
1922
1923#ifdef __LP64__
1924 .export sys32_execve_wrapper
1925 .import sys32_execve
1926
1927sys32_execve_wrapper:
1928 execve_wrapper sys32_execve
1929#endif
1930
1931 .export sys_rt_sigreturn_wrapper
1932sys_rt_sigreturn_wrapper:
1933 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1934 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1935 /* Don't save regs, we are going to restore them from sigcontext. */
1936 STREG %r2, -RP_OFFSET(%r30)
1937#ifdef __LP64__
1938 ldo FRAME_SIZE(%r30), %r30
1939 BL sys_rt_sigreturn,%r2
1940 ldo -16(%r30),%r29 /* Reference param save area */
1941#else
1942 BL sys_rt_sigreturn,%r2
1943 ldo FRAME_SIZE(%r30), %r30
1944#endif
1945
1946 ldo -FRAME_SIZE(%r30), %r30
1947 LDREG -RP_OFFSET(%r30), %r2
1948
1949 /* FIXME: I think we need to restore a few more things here. */
1950 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1951 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1952 reg_restore %r1
1953
1954 /* If the signal was received while the process was blocked on a
1955 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1956 * take us to syscall_exit_rfi and on to intr_return.
1957 */
1958 bv %r0(%r2)
1959 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1960
1961 .export sys_sigaltstack_wrapper
1962sys_sigaltstack_wrapper:
1963 /* Get the user stack pointer */
1964 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1965 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1966 LDREG TASK_PT_GR30(%r24),%r24
1967 STREG %r2, -RP_OFFSET(%r30)
1968#ifdef __LP64__
1969 ldo FRAME_SIZE(%r30), %r30
1970 b,l do_sigaltstack,%r2
1971 ldo -16(%r30),%r29 /* Reference param save area */
1972#else
1973 bl do_sigaltstack,%r2
1974 ldo FRAME_SIZE(%r30), %r30
1975#endif
1976
1977 ldo -FRAME_SIZE(%r30), %r30
1978 LDREG -RP_OFFSET(%r30), %r2
1979 bv %r0(%r2)
1980 nop
1981
1982#ifdef __LP64__
1983 .export sys32_sigaltstack_wrapper
1984sys32_sigaltstack_wrapper:
1985 /* Get the user stack pointer */
1986 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1987 LDREG TASK_PT_GR30(%r24),%r24
1988 STREG %r2, -RP_OFFSET(%r30)
1989 ldo FRAME_SIZE(%r30), %r30
1990 b,l do_sigaltstack32,%r2
1991 ldo -16(%r30),%r29 /* Reference param save area */
1992
1993 ldo -FRAME_SIZE(%r30), %r30
1994 LDREG -RP_OFFSET(%r30), %r2
1995 bv %r0(%r2)
1996 nop
1997#endif
1998
1999 .export sys_rt_sigsuspend_wrapper
2000sys_rt_sigsuspend_wrapper:
2001 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2002 ldo TASK_REGS(%r1),%r24
2003 reg_save %r24
2004
2005 STREG %r2, -RP_OFFSET(%r30)
2006#ifdef __LP64__
2007 ldo FRAME_SIZE(%r30), %r30
2008 b,l sys_rt_sigsuspend,%r2
2009 ldo -16(%r30),%r29 /* Reference param save area */
2010#else
2011 bl sys_rt_sigsuspend,%r2
2012 ldo FRAME_SIZE(%r30), %r30
2013#endif
2014
2015 ldo -FRAME_SIZE(%r30), %r30
2016 LDREG -RP_OFFSET(%r30), %r2
2017
2018 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2019 ldo TASK_REGS(%r1),%r1
2020 reg_restore %r1
2021
2022 bv %r0(%r2)
2023 nop
2024
2025 .export syscall_exit
2026syscall_exit:
2027
2028 /* NOTE: HP-UX syscalls also come through here
2029 * after hpux_syscall_exit fixes up return
2030 * values. */
2031
2032 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
2033 * via syscall_exit_rfi if the signal was received while the process
2034 * was running.
2035 */
2036
2037 /* save return value now */
2038
2039 mfctl %cr30, %r1
2040 LDREG TI_TASK(%r1),%r1
2041 STREG %r28,TASK_PT_GR28(%r1)
2042
2043#ifdef CONFIG_HPUX
2044
2045/* <linux/personality.h> cannot be easily included */
2046#define PER_HPUX 0x10
2047 LDREG TASK_PERSONALITY(%r1),%r19
2048
2049 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2050 ldo -PER_HPUX(%r19), %r19
2051 CMPIB<>,n 0,%r19,1f
2052
2053 /* Save other hpux returns if personality is PER_HPUX */
2054 STREG %r22,TASK_PT_GR22(%r1)
2055 STREG %r29,TASK_PT_GR29(%r1)
20561:
2057
2058#endif /* CONFIG_HPUX */
2059
2060 /* Seems to me that dp could be wrong here, if the syscall involved
2061 * calling a module, and nothing got round to restoring dp on return.
2062 */
2063 loadgp
2064
2065syscall_check_bh:
2066
2067 /* Check for software interrupts */
2068
2069 .import irq_stat,data
2070
2071 load32 irq_stat,%r19
2072
2073#ifdef CONFIG_SMP
2074 /* sched.h: int processor */
2075 /* %r26 is used as scratch register to index into irq_stat[] */
2076 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2077
2078 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2079#ifdef __LP64__
2080 shld %r26, 6, %r20
2081#else
2082 shlw %r26, 5, %r20
2083#endif
2084 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2085#endif /* CONFIG_SMP */
2086
2087 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
2088 cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
2089
2090syscall_check_resched:
2091
2092 /* check for reschedule */
2093
2094 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2095 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2096
2097syscall_check_sig:
2098 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
2099 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
2100
2101syscall_restore:
2102 /* Are we being ptraced? */
2103 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2104
2105 LDREG TASK_PTRACE(%r1), %r19
2106 bb,< %r19,31,syscall_restore_rfi
2107 nop
2108
2109 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2110 rest_fp %r19
2111
2112 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2113 mtsar %r19
2114
2115 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2116 LDREG TASK_PT_GR19(%r1),%r19
2117 LDREG TASK_PT_GR20(%r1),%r20
2118 LDREG TASK_PT_GR21(%r1),%r21
2119 LDREG TASK_PT_GR22(%r1),%r22
2120 LDREG TASK_PT_GR23(%r1),%r23
2121 LDREG TASK_PT_GR24(%r1),%r24
2122 LDREG TASK_PT_GR25(%r1),%r25
2123 LDREG TASK_PT_GR26(%r1),%r26
2124 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2125 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2126 LDREG TASK_PT_GR29(%r1),%r29
2127 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2128
2129 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2130 rsm PSW_SM_I, %r0
2131 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
2132 mfsp %sr3,%r1 /* Get users space id */
2133 mtsp %r1,%sr7 /* Restore sr7 */
2134 ssm PSW_SM_I, %r0
2135
2136 /* Set sr2 to zero for userspace syscalls to work. */
2137 mtsp %r0,%sr2
2138 mtsp %r1,%sr4 /* Restore sr4 */
2139 mtsp %r1,%sr5 /* Restore sr5 */
2140 mtsp %r1,%sr6 /* Restore sr6 */
2141
2142 depi 3,31,2,%r31 /* ensure return to user mode. */
2143
2144#ifdef __LP64__
2145 /* decide whether to reset the wide mode bit
2146 *
2147 * For a syscall, the W bit is stored in the lowest bit
2148 * of sp. Extract it and reset W if it is zero */
2149 extrd,u,*<> %r30,63,1,%r1
2150 rsm PSW_SM_W, %r0
2151 /* now reset the lowest bit of sp if it was set */
2152 xor %r30,%r1,%r30
2153#endif
2154 be,n 0(%sr3,%r31) /* return to user space */
2155
2156 /* We have to return via an RFI, so that PSW T and R bits can be set
2157 * appropriately.
2158 * This sets up pt_regs so we can return via intr_restore, which is not
2159 * the most efficient way of doing things, but it works.
2160 */
2161syscall_restore_rfi:
2162 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2163 mtctl %r2,%cr0 /* for immediate trap */
2164 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2165 ldi 0x0b,%r20 /* Create new PSW */
2166 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2167
2168 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2169 * set in include/linux/ptrace.h and converted to PA bitmap
2170 * numbers in asm-offsets.c */
2171
2172 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2173 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
2174 depi -1,27,1,%r20 /* R bit */
2175
2176 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2177 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2178 depi -1,7,1,%r20 /* T bit */
2179
2180 STREG %r20,TASK_PT_PSW(%r1)
2181
2182 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2183
2184 mfsp %sr3,%r25
2185 STREG %r25,TASK_PT_SR3(%r1)
2186 STREG %r25,TASK_PT_SR4(%r1)
2187 STREG %r25,TASK_PT_SR5(%r1)
2188 STREG %r25,TASK_PT_SR6(%r1)
2189 STREG %r25,TASK_PT_SR7(%r1)
2190 STREG %r25,TASK_PT_IASQ0(%r1)
2191 STREG %r25,TASK_PT_IASQ1(%r1)
2192
2193 /* XXX W bit??? */
2194 /* Now if old D bit is clear, it means we didn't save all registers
2195 * on syscall entry, so do that now. This only happens on TRACEME
2196 * calls, or if someone attached to us while we were on a syscall.
2197 * We could make this more efficient by not saving r3-r18, but
2198 * then we wouldn't be able to use the common intr_restore path.
2199 * It is only for traced processes anyway, so performance is not
2200 * an issue.
2201 */
2202 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2203 ldo TASK_REGS(%r1),%r25
2204 reg_save %r25 /* Save r3 to r18 */
2205
2206 /* Save the current sr */
2207 mfsp %sr0,%r2
2208 STREG %r2,TASK_PT_SR0(%r1)
2209
2210 /* Save the scratch sr */
2211 mfsp %sr1,%r2
2212 STREG %r2,TASK_PT_SR1(%r1)
2213
2214 /* sr2 should be set to zero for userspace syscalls */
2215 STREG %r0,TASK_PT_SR2(%r1)
2216
2217pt_regs_ok:
2218 LDREG TASK_PT_GR31(%r1),%r2
2219 depi 3,31,2,%r2 /* ensure return to user mode. */
2220 STREG %r2,TASK_PT_IAOQ0(%r1)
2221 ldo 4(%r2),%r2
2222 STREG %r2,TASK_PT_IAOQ1(%r1)
2223 copy %r25,%r16
2224 b intr_restore
2225 nop
2226
2227 .import do_softirq,code
2228syscall_do_softirq:
2229 bl do_softirq,%r2
2230 nop
2231 /* NOTE: We enable I-bit incase we schedule later,
2232 * and we might be going back to userspace if we were
2233 * traced. */
2234 b syscall_check_resched
2235 ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */
2236
2237 .import schedule,code
2238syscall_do_resched:
2239 BL schedule,%r2
2240#ifdef __LP64__
2241 ldo -16(%r30),%r29 /* Reference param save area */
2242#else
2243 nop
2244#endif
2245 b syscall_check_bh /* if resched, we start over again */
2246 nop
2247
2248 .import do_signal,code
2249syscall_do_signal:
2250 /* Save callee-save registers (for sigcontext).
2251 FIXME: After this point the process structure should be
2252 consistent with all the relevant state of the process
2253 before the syscall. We need to verify this. */
2254 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2255 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
2256 reg_save %r25
2257
2258 ldi 1, %r24 /* unsigned long in_syscall */
2259
2260#ifdef __LP64__
2261 ldo -16(%r30),%r29 /* Reference param save area */
2262#endif
2263 BL do_signal,%r2
2264 copy %r0, %r26 /* sigset_t *oldset = NULL */
2265
2266 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2267 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2268 reg_restore %r20
2269
2270 b,n syscall_check_sig
2271
2272 /*
2273 * get_register is used by the non access tlb miss handlers to
2274 * copy the value of the general register specified in r8 into
2275 * r1. This routine can't be used for shadowed registers, since
2276 * the rfir will restore the original value. So, for the shadowed
2277 * registers we put a -1 into r1 to indicate that the register
2278 * should not be used (the register being copied could also have
2279 * a -1 in it, but that is OK, it just means that we will have
2280 * to use the slow path instead).
2281 */
2282
2283get_register:
2284 blr %r8,%r0
2285 nop
2286 bv %r0(%r25) /* r0 */
2287 copy %r0,%r1
2288 bv %r0(%r25) /* r1 - shadowed */
2289 ldi -1,%r1
2290 bv %r0(%r25) /* r2 */
2291 copy %r2,%r1
2292 bv %r0(%r25) /* r3 */
2293 copy %r3,%r1
2294 bv %r0(%r25) /* r4 */
2295 copy %r4,%r1
2296 bv %r0(%r25) /* r5 */
2297 copy %r5,%r1
2298 bv %r0(%r25) /* r6 */
2299 copy %r6,%r1
2300 bv %r0(%r25) /* r7 */
2301 copy %r7,%r1
2302 bv %r0(%r25) /* r8 - shadowed */
2303 ldi -1,%r1
2304 bv %r0(%r25) /* r9 - shadowed */
2305 ldi -1,%r1
2306 bv %r0(%r25) /* r10 */
2307 copy %r10,%r1
2308 bv %r0(%r25) /* r11 */
2309 copy %r11,%r1
2310 bv %r0(%r25) /* r12 */
2311 copy %r12,%r1
2312 bv %r0(%r25) /* r13 */
2313 copy %r13,%r1
2314 bv %r0(%r25) /* r14 */
2315 copy %r14,%r1
2316 bv %r0(%r25) /* r15 */
2317 copy %r15,%r1
2318 bv %r0(%r25) /* r16 - shadowed */
2319 ldi -1,%r1
2320 bv %r0(%r25) /* r17 - shadowed */
2321 ldi -1,%r1
2322 bv %r0(%r25) /* r18 */
2323 copy %r18,%r1
2324 bv %r0(%r25) /* r19 */
2325 copy %r19,%r1
2326 bv %r0(%r25) /* r20 */
2327 copy %r20,%r1
2328 bv %r0(%r25) /* r21 */
2329 copy %r21,%r1
2330 bv %r0(%r25) /* r22 */
2331 copy %r22,%r1
2332 bv %r0(%r25) /* r23 */
2333 copy %r23,%r1
2334 bv %r0(%r25) /* r24 - shadowed */
2335 ldi -1,%r1
2336 bv %r0(%r25) /* r25 - shadowed */
2337 ldi -1,%r1
2338 bv %r0(%r25) /* r26 */
2339 copy %r26,%r1
2340 bv %r0(%r25) /* r27 */
2341 copy %r27,%r1
2342 bv %r0(%r25) /* r28 */
2343 copy %r28,%r1
2344 bv %r0(%r25) /* r29 */
2345 copy %r29,%r1
2346 bv %r0(%r25) /* r30 */
2347 copy %r30,%r1
2348 bv %r0(%r25) /* r31 */
2349 copy %r31,%r1
2350
2351 /*
2352 * set_register is used by the non access tlb miss handlers to
2353 * copy the value of r1 into the general register specified in
2354 * r8.
2355 */
2356
2357set_register:
2358 blr %r8,%r0
2359 nop
2360 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2361 copy %r1,%r0
2362 bv %r0(%r25) /* r1 */
2363 copy %r1,%r1
2364 bv %r0(%r25) /* r2 */
2365 copy %r1,%r2
2366 bv %r0(%r25) /* r3 */
2367 copy %r1,%r3
2368 bv %r0(%r25) /* r4 */
2369 copy %r1,%r4
2370 bv %r0(%r25) /* r5 */
2371 copy %r1,%r5
2372 bv %r0(%r25) /* r6 */
2373 copy %r1,%r6
2374 bv %r0(%r25) /* r7 */
2375 copy %r1,%r7
2376 bv %r0(%r25) /* r8 */
2377 copy %r1,%r8
2378 bv %r0(%r25) /* r9 */
2379 copy %r1,%r9
2380 bv %r0(%r25) /* r10 */
2381 copy %r1,%r10
2382 bv %r0(%r25) /* r11 */
2383 copy %r1,%r11
2384 bv %r0(%r25) /* r12 */
2385 copy %r1,%r12
2386 bv %r0(%r25) /* r13 */
2387 copy %r1,%r13
2388 bv %r0(%r25) /* r14 */
2389 copy %r1,%r14
2390 bv %r0(%r25) /* r15 */
2391 copy %r1,%r15
2392 bv %r0(%r25) /* r16 */
2393 copy %r1,%r16
2394 bv %r0(%r25) /* r17 */
2395 copy %r1,%r17
2396 bv %r0(%r25) /* r18 */
2397 copy %r1,%r18
2398 bv %r0(%r25) /* r19 */
2399 copy %r1,%r19
2400 bv %r0(%r25) /* r20 */
2401 copy %r1,%r20
2402 bv %r0(%r25) /* r21 */
2403 copy %r1,%r21
2404 bv %r0(%r25) /* r22 */
2405 copy %r1,%r22
2406 bv %r0(%r25) /* r23 */
2407 copy %r1,%r23
2408 bv %r0(%r25) /* r24 */
2409 copy %r1,%r24
2410 bv %r0(%r25) /* r25 */
2411 copy %r1,%r25
2412 bv %r0(%r25) /* r26 */
2413 copy %r1,%r26
2414 bv %r0(%r25) /* r27 */
2415 copy %r1,%r27
2416 bv %r0(%r25) /* r28 */
2417 copy %r1,%r28
2418 bv %r0(%r25) /* r29 */
2419 copy %r1,%r29
2420 bv %r0(%r25) /* r30 */
2421 copy %r1,%r30
2422 bv %r0(%r25) /* r31 */
2423 copy %r1,%r31