blob: eb54f0548b0145e125eab0b55b16b17d53f53dc4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h>
29#include <linux/threads.h>
30#include <asm/processor.h>
31#include <asm/page.h>
32#include <asm/mmu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/systemcfg.h>
34#include <asm/ppc_asm.h>
35#include <asm/offsets.h>
36#include <asm/bug.h>
37#include <asm/cputable.h>
38#include <asm/setup.h>
39#include <asm/hvcall.h>
Stephen Rothwell2ad56492005-08-17 13:01:50 +100040#include <asm/iSeries/LparMap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#ifdef CONFIG_PPC_ISERIES
43#define DO_SOFT_DISABLE
44#endif
45
46/*
47 * hcall interface to pSeries LPAR
48 */
49#define H_SET_ASR 0x30
50
51/*
52 * We layout physical memory as follows:
53 * 0x0000 - 0x00ff : Secondary processor spin code
54 * 0x0100 - 0x2fff : pSeries Interrupt prologs
David Gibsonec465512005-08-19 14:52:31 +100055 * 0x3000 - 0x6fff : interrupt support, iSeries and common interrupt prologs
56 * 0x7000 - 0x7fff : FWNMI data area
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 * 0x9000 - 0x9fff : Initial segment table
58 */
59
60/*
61 * SPRG Usage
62 *
63 * Register Definition
64 *
65 * SPRG0 reserved for hypervisor
66 * SPRG1 temp - used to save gpr
67 * SPRG2 temp - used to save gpr
68 * SPRG3 virt addr of paca
69 */
70
71/*
72 * Entering into this code we make the following assumptions:
73 * For pSeries:
74 * 1. The MMU is off & open firmware is running in real mode.
75 * 2. The kernel is entered at __start
76 *
77 * For iSeries:
78 * 1. The MMU is on (as it always is for iSeries)
79 * 2. The kernel is entered at system_reset_iSeries
80 */
81
82 .text
83 .globl _stext
84_stext:
85#ifdef CONFIG_PPC_MULTIPLATFORM
86_GLOBAL(__start)
87 /* NOP this out unconditionally */
88BEGIN_FTR_SECTION
89 b .__start_initialization_multiplatform
90END_FTR_SECTION(0, 1)
91#endif /* CONFIG_PPC_MULTIPLATFORM */
92
93 /* Catch branch to 0 in real mode */
94 trap
95#ifdef CONFIG_PPC_ISERIES
96 /*
97 * At offset 0x20, there is a pointer to iSeries LPAR data.
98 * This is required by the hypervisor
99 */
100 . = 0x20
101 .llong hvReleaseData-KERNELBASE
102
103 /*
104 * At offset 0x28 and 0x30 are offsets to the msChunks
105 * array (used by the iSeries LPAR debugger to do translation
106 * between physical addresses and absolute addresses) and
107 * to the pidhash table (also used by the debugger)
108 */
109 .llong msChunks-KERNELBASE
110 .llong 0 /* pidhash-KERNELBASE SFRXXX */
111
112 /* Offset 0x38 - Pointer to start of embedded System.map */
113 .globl embedded_sysmap_start
114embedded_sysmap_start:
115 .llong 0
116 /* Offset 0x40 - Pointer to end of embedded System.map */
117 .globl embedded_sysmap_end
118embedded_sysmap_end:
119 .llong 0
120
121#else /* CONFIG_PPC_ISERIES */
122
123 /* Secondary processors spin on this value until it goes to 1. */
124 .globl __secondary_hold_spinloop
125__secondary_hold_spinloop:
126 .llong 0x0
127
128 /* Secondary processors write this value with their cpu # */
129 /* after they enter the spin loop immediately below. */
130 .globl __secondary_hold_acknowledge
131__secondary_hold_acknowledge:
132 .llong 0x0
133
134 . = 0x60
135/*
136 * The following code is used on pSeries to hold secondary processors
137 * in a spin loop after they have been freed from OpenFirmware, but
138 * before the bulk of the kernel has been relocated. This code
139 * is relocated to physical address 0x60 before prom_init is run.
140 * All of it must fit below the first exception vector at 0x100.
141 */
142_GLOBAL(__secondary_hold)
143 mfmsr r24
144 ori r24,r24,MSR_RI
145 mtmsrd r24 /* RI on */
146
147 /* Grab our linux cpu number */
148 mr r24,r3
149
150 /* Tell the master cpu we're here */
151 /* Relocation is off & we are located at an address less */
152 /* than 0x100, so only need to grab low order offset. */
153 std r24,__secondary_hold_acknowledge@l(0)
154 sync
155
156 /* All secondary cpu's wait here until told to start. */
157100: ld r4,__secondary_hold_spinloop@l(0)
158 cmpdi 0,r4,1
159 bne 100b
160
161#ifdef CONFIG_HMT
162 b .hmt_init
163#else
164#ifdef CONFIG_SMP
165 mr r3,r24
166 b .pSeries_secondary_smp_init
167#else
168 BUG_OPCODE
169#endif
170#endif
171#endif
172
173/* This value is used to mark exception frames on the stack. */
174 .section ".toc","aw"
175exception_marker:
176 .tc ID_72656773_68657265[TC],0x7265677368657265
177 .text
178
179/*
180 * The following macros define the code that appears as
181 * the prologue to each of the exception handlers. They
182 * are split into two parts to allow a single kernel binary
183 * to be used for pSeries and iSeries.
184 * LOL. One day... - paulus
185 */
186
187/*
188 * We make as much of the exception code common between native
189 * exception handlers (including pSeries LPAR) and iSeries LPAR
190 * implementations as possible.
191 */
192
193/*
194 * This is the start of the interrupt handlers for pSeries
195 * This code runs with relocation off.
196 */
197#define EX_R9 0
198#define EX_R10 8
199#define EX_R11 16
200#define EX_R12 24
201#define EX_R13 32
202#define EX_SRR0 40
203#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
204#define EX_DAR 48
205#define EX_LR 48 /* SLB miss saves LR, but not DAR */
206#define EX_DSISR 56
207#define EX_CCR 60
208
209#define EXCEPTION_PROLOG_PSERIES(area, label) \
210 mfspr r13,SPRG3; /* get paca address into r13 */ \
211 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
212 std r10,area+EX_R10(r13); \
213 std r11,area+EX_R11(r13); \
214 std r12,area+EX_R12(r13); \
215 mfspr r9,SPRG1; \
216 std r9,area+EX_R13(r13); \
217 mfcr r9; \
218 clrrdi r12,r13,32; /* get high part of &label */ \
219 mfmsr r10; \
220 mfspr r11,SRR0; /* save SRR0 */ \
221 ori r12,r12,(label)@l; /* virt addr of handler */ \
222 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
223 mtspr SRR0,r12; \
224 mfspr r12,SRR1; /* and SRR1 */ \
225 mtspr SRR1,r10; \
226 rfid; \
227 b . /* prevent speculative execution */
228
229/*
230 * This is the start of the interrupt handlers for iSeries
231 * This code runs with relocation on.
232 */
233#define EXCEPTION_PROLOG_ISERIES_1(area) \
234 mfspr r13,SPRG3; /* get paca address into r13 */ \
235 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
236 std r10,area+EX_R10(r13); \
237 std r11,area+EX_R11(r13); \
238 std r12,area+EX_R12(r13); \
239 mfspr r9,SPRG1; \
240 std r9,area+EX_R13(r13); \
241 mfcr r9
242
243#define EXCEPTION_PROLOG_ISERIES_2 \
244 mfmsr r10; \
245 ld r11,PACALPPACA+LPPACASRR0(r13); \
246 ld r12,PACALPPACA+LPPACASRR1(r13); \
247 ori r10,r10,MSR_RI; \
248 mtmsrd r10,1
249
250/*
251 * The common exception prolog is used for all except a few exceptions
252 * such as a segment miss on a kernel address. We have to be prepared
253 * to take another exception from the point where we first touch the
254 * kernel stack onwards.
255 *
256 * On entry r13 points to the paca, r9-r13 are saved in the paca,
257 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
258 * SRR1, and relocation is on.
259 */
260#define EXCEPTION_PROLOG_COMMON(n, area) \
261 andi. r10,r12,MSR_PR; /* See if coming from user */ \
262 mr r10,r1; /* Save r1 */ \
263 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
264 beq- 1f; \
265 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2661: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
267 bge- cr1,bad_stack; /* abort if it is */ \
268 std r9,_CCR(r1); /* save CR in stackframe */ \
269 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
270 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
271 std r10,0(r1); /* make stack chain pointer */ \
272 std r0,GPR0(r1); /* save r0 in stackframe */ \
273 std r10,GPR1(r1); /* save r1 in stackframe */ \
274 std r2,GPR2(r1); /* save r2 in stackframe */ \
275 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
276 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
277 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
278 ld r10,area+EX_R10(r13); \
279 std r9,GPR9(r1); \
280 std r10,GPR10(r1); \
281 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
282 ld r10,area+EX_R12(r13); \
283 ld r11,area+EX_R13(r13); \
284 std r9,GPR11(r1); \
285 std r10,GPR12(r1); \
286 std r11,GPR13(r1); \
287 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
288 mflr r9; /* save LR in stackframe */ \
289 std r9,_LINK(r1); \
290 mfctr r10; /* save CTR in stackframe */ \
291 std r10,_CTR(r1); \
292 mfspr r11,XER; /* save XER in stackframe */ \
293 std r11,_XER(r1); \
294 li r9,(n)+1; \
295 std r9,_TRAP(r1); /* set trap number */ \
296 li r10,0; \
297 ld r11,exception_marker@toc(r2); \
298 std r10,RESULT(r1); /* clear regs->result */ \
299 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
300
301/*
302 * Exception vectors.
303 */
304#define STD_EXCEPTION_PSERIES(n, label) \
305 . = n; \
306 .globl label##_pSeries; \
307label##_pSeries: \
308 HMT_MEDIUM; \
309 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700310 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
312
313#define STD_EXCEPTION_ISERIES(n, label, area) \
314 .globl label##_iSeries; \
315label##_iSeries: \
316 HMT_MEDIUM; \
317 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700318 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 EXCEPTION_PROLOG_ISERIES_1(area); \
320 EXCEPTION_PROLOG_ISERIES_2; \
321 b label##_common
322
323#define MASKABLE_EXCEPTION_ISERIES(n, label) \
324 .globl label##_iSeries; \
325label##_iSeries: \
326 HMT_MEDIUM; \
327 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700328 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
330 lbz r10,PACAPROCENABLED(r13); \
331 cmpwi 0,r10,0; \
332 beq- label##_iSeries_masked; \
333 EXCEPTION_PROLOG_ISERIES_2; \
334 b label##_common; \
335
336#ifdef DO_SOFT_DISABLE
337#define DISABLE_INTS \
338 lbz r10,PACAPROCENABLED(r13); \
339 li r11,0; \
340 std r10,SOFTE(r1); \
341 mfmsr r10; \
342 stb r11,PACAPROCENABLED(r13); \
343 ori r10,r10,MSR_EE; \
344 mtmsrd r10,1
345
346#define ENABLE_INTS \
347 lbz r10,PACAPROCENABLED(r13); \
348 mfmsr r11; \
349 std r10,SOFTE(r1); \
350 ori r11,r11,MSR_EE; \
351 mtmsrd r11,1
352
353#else /* hard enable/disable interrupts */
354#define DISABLE_INTS
355
356#define ENABLE_INTS \
357 ld r12,_MSR(r1); \
358 mfmsr r11; \
359 rlwimi r11,r12,0,MSR_EE; \
360 mtmsrd r11,1
361
362#endif
363
364#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
365 .align 7; \
366 .globl label##_common; \
367label##_common: \
368 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
369 DISABLE_INTS; \
370 bl .save_nvgprs; \
371 addi r3,r1,STACK_FRAME_OVERHEAD; \
372 bl hdlr; \
373 b .ret_from_except
374
375#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
376 .align 7; \
377 .globl label##_common; \
378label##_common: \
379 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
380 DISABLE_INTS; \
381 addi r3,r1,STACK_FRAME_OVERHEAD; \
382 bl hdlr; \
383 b .ret_from_except_lite
384
385/*
386 * Start of pSeries system interrupt routines
387 */
388 . = 0x100
389 .globl __start_interrupts
390__start_interrupts:
391
392 STD_EXCEPTION_PSERIES(0x100, system_reset)
393
394 . = 0x200
395_machine_check_pSeries:
396 HMT_MEDIUM
397 mtspr SPRG1,r13 /* save r13 */
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700398 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
400
401 . = 0x300
402 .globl data_access_pSeries
403data_access_pSeries:
404 HMT_MEDIUM
405 mtspr SPRG1,r13
406BEGIN_FTR_SECTION
407 mtspr SPRG2,r12
408 mfspr r13,DAR
409 mfspr r12,DSISR
410 srdi r13,r13,60
411 rlwimi r13,r12,16,0x20
412 mfcr r12
413 cmpwi r13,0x2c
414 beq .do_stab_bolted_pSeries
415 mtcrf 0x80,r12
416 mfspr r12,SPRG2
417END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
418 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
419
420 . = 0x380
421 .globl data_access_slb_pSeries
422data_access_slb_pSeries:
423 HMT_MEDIUM
424 mtspr SPRG1,r13
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700425 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 mfspr r13,SPRG3 /* get paca address into r13 */
427 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
428 std r10,PACA_EXSLB+EX_R10(r13)
429 std r11,PACA_EXSLB+EX_R11(r13)
430 std r12,PACA_EXSLB+EX_R12(r13)
431 std r3,PACA_EXSLB+EX_R3(r13)
432 mfspr r9,SPRG1
433 std r9,PACA_EXSLB+EX_R13(r13)
434 mfcr r9
435 mfspr r12,SRR1 /* and SRR1 */
436 mfspr r3,DAR
437 b .do_slb_miss /* Rel. branch works in real mode */
438
439 STD_EXCEPTION_PSERIES(0x400, instruction_access)
440
441 . = 0x480
442 .globl instruction_access_slb_pSeries
443instruction_access_slb_pSeries:
444 HMT_MEDIUM
445 mtspr SPRG1,r13
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700446 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 mfspr r13,SPRG3 /* get paca address into r13 */
448 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
449 std r10,PACA_EXSLB+EX_R10(r13)
450 std r11,PACA_EXSLB+EX_R11(r13)
451 std r12,PACA_EXSLB+EX_R12(r13)
452 std r3,PACA_EXSLB+EX_R3(r13)
453 mfspr r9,SPRG1
454 std r9,PACA_EXSLB+EX_R13(r13)
455 mfcr r9
456 mfspr r12,SRR1 /* and SRR1 */
457 mfspr r3,SRR0 /* SRR0 is faulting address */
458 b .do_slb_miss /* Rel. branch works in real mode */
459
460 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
461 STD_EXCEPTION_PSERIES(0x600, alignment)
462 STD_EXCEPTION_PSERIES(0x700, program_check)
463 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
464 STD_EXCEPTION_PSERIES(0x900, decrementer)
465 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
466 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
467
468 . = 0xc00
469 .globl system_call_pSeries
470system_call_pSeries:
471 HMT_MEDIUM
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700472 RUNLATCH_ON(r9)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 mr r9,r13
474 mfmsr r10
475 mfspr r13,SPRG3
476 mfspr r11,SRR0
477 clrrdi r12,r13,32
478 oris r12,r12,system_call_common@h
479 ori r12,r12,system_call_common@l
480 mtspr SRR0,r12
481 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
482 mfspr r12,SRR1
483 mtspr SRR1,r10
484 rfid
485 b . /* prevent speculative execution */
486
487 STD_EXCEPTION_PSERIES(0xd00, single_step)
488 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
489
490 /* We need to deal with the Altivec unavailable exception
491 * here which is at 0xf20, thus in the middle of the
492 * prolog code of the PerformanceMonitor one. A little
493 * trickery is thus necessary
494 */
495 . = 0xf00
496 b performance_monitor_pSeries
497
498 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
499
500 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
501 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
502
David Gibsonec465512005-08-19 14:52:31 +1000503 . = 0x3000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
David Gibsonec465512005-08-19 14:52:31 +1000505/*** pSeries interrupt support ***/
506
507 /* moved from 0xf00 */
508 STD_EXCEPTION_PSERIES(., performance_monitor)
509
510 .align 7
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511_GLOBAL(do_stab_bolted_pSeries)
512 mtcrf 0x80,r12
513 mfspr r12,SPRG2
514 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
515
David Gibsonec465512005-08-19 14:52:31 +1000516/*
517 * Vectors for the FWNMI option. Share common code.
518 */
519 .globl system_reset_fwnmi
520system_reset_fwnmi:
521 HMT_MEDIUM
522 mtspr SPRG1,r13 /* save r13 */
523 RUNLATCH_ON(r13)
524 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
David Gibsonec465512005-08-19 14:52:31 +1000526 .globl machine_check_fwnmi
527machine_check_fwnmi:
528 HMT_MEDIUM
529 mtspr SPRG1,r13 /* save r13 */
530 RUNLATCH_ON(r13)
531 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
David Gibson2e2446e2005-08-19 14:52:31 +1000533#ifdef CONFIG_PPC_ISERIES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534/*** ISeries-LPAR interrupt handlers ***/
535
536 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
537
538 .globl data_access_iSeries
539data_access_iSeries:
540 mtspr SPRG1,r13
541BEGIN_FTR_SECTION
542 mtspr SPRG2,r12
543 mfspr r13,DAR
544 mfspr r12,DSISR
545 srdi r13,r13,60
546 rlwimi r13,r12,16,0x20
547 mfcr r12
548 cmpwi r13,0x2c
549 beq .do_stab_bolted_iSeries
550 mtcrf 0x80,r12
551 mfspr r12,SPRG2
552END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
553 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
554 EXCEPTION_PROLOG_ISERIES_2
555 b data_access_common
556
557.do_stab_bolted_iSeries:
558 mtcrf 0x80,r12
559 mfspr r12,SPRG2
560 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
561 EXCEPTION_PROLOG_ISERIES_2
562 b .do_stab_bolted
563
564 .globl data_access_slb_iSeries
565data_access_slb_iSeries:
566 mtspr SPRG1,r13 /* save r13 */
567 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
568 std r3,PACA_EXSLB+EX_R3(r13)
569 ld r12,PACALPPACA+LPPACASRR1(r13)
570 mfspr r3,DAR
571 b .do_slb_miss
572
573 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
574
575 .globl instruction_access_slb_iSeries
576instruction_access_slb_iSeries:
577 mtspr SPRG1,r13 /* save r13 */
578 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
579 std r3,PACA_EXSLB+EX_R3(r13)
580 ld r12,PACALPPACA+LPPACASRR1(r13)
581 ld r3,PACALPPACA+LPPACASRR0(r13)
582 b .do_slb_miss
583
584 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
585 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
586 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
587 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
588 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
589 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
590 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
591
592 .globl system_call_iSeries
593system_call_iSeries:
594 mr r9,r13
595 mfspr r13,SPRG3
596 EXCEPTION_PROLOG_ISERIES_2
597 b system_call_common
598
599 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
600 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
601 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
602
603 .globl system_reset_iSeries
604system_reset_iSeries:
605 mfspr r13,SPRG3 /* Get paca address */
606 mfmsr r24
607 ori r24,r24,MSR_RI
608 mtmsrd r24 /* RI on */
609 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
610 cmpwi 0,r24,0 /* Are we processor 0? */
611 beq .__start_initialization_iSeries /* Start up the first processor */
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -0700612 mfspr r4,SPRN_CTRLF
613 li r5,CTRL_RUNLATCH /* Turn off the run light */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 andc r4,r4,r5
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -0700615 mtspr SPRN_CTRLT,r4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
6171:
618 HMT_LOW
619#ifdef CONFIG_SMP
620 lbz r23,PACAPROCSTART(r13) /* Test if this processor
621 * should start */
622 sync
623 LOADADDR(r3,current_set)
624 sldi r28,r24,3 /* get current_set[cpu#] */
625 ldx r3,r3,r28
626 addi r1,r3,THREAD_SIZE
627 subi r1,r1,STACK_FRAME_OVERHEAD
628
629 cmpwi 0,r23,0
630 beq iSeries_secondary_smp_loop /* Loop until told to go */
631#ifdef SECONDARY_PROCESSORS
632 bne .__secondary_start /* Loop until told to go */
633#endif
634iSeries_secondary_smp_loop:
635 /* Let the Hypervisor know we are alive */
636 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
637 lis r3,0x8002
638 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
639#else /* CONFIG_SMP */
640 /* Yield the processor. This is required for non-SMP kernels
641 which are running on multi-threaded machines. */
642 lis r3,0x8000
643 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
644 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
645 li r4,0 /* "yield timed" */
646 li r5,-1 /* "yield forever" */
647#endif /* CONFIG_SMP */
648 li r0,-1 /* r0=-1 indicates a Hypervisor call */
649 sc /* Invoke the hypervisor via a system call */
650 mfspr r13,SPRG3 /* Put r13 back ???? */
651 b 1b /* If SMP not configured, secondaries
652 * loop forever */
653
654 .globl decrementer_iSeries_masked
655decrementer_iSeries_masked:
656 li r11,1
657 stb r11,PACALPPACA+LPPACADECRINT(r13)
658 lwz r12,PACADEFAULTDECR(r13)
659 mtspr SPRN_DEC,r12
660 /* fall through */
661
662 .globl hardware_interrupt_iSeries_masked
663hardware_interrupt_iSeries_masked:
664 mtcrf 0x80,r9 /* Restore regs */
665 ld r11,PACALPPACA+LPPACASRR0(r13)
666 ld r12,PACALPPACA+LPPACASRR1(r13)
667 mtspr SRR0,r11
668 mtspr SRR1,r12
669 ld r9,PACA_EXGEN+EX_R9(r13)
670 ld r10,PACA_EXGEN+EX_R10(r13)
671 ld r11,PACA_EXGEN+EX_R11(r13)
672 ld r12,PACA_EXGEN+EX_R12(r13)
673 ld r13,PACA_EXGEN+EX_R13(r13)
674 rfid
675 b . /* prevent speculative execution */
Stephen Rothwell2ad56492005-08-17 13:01:50 +1000676#endif /* CONFIG_PPC_ISERIES */
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678/*** Common interrupt handlers ***/
679
680 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
681
682 /*
683 * Machine check is different because we use a different
684 * save area: PACA_EXMC instead of PACA_EXGEN.
685 */
686 .align 7
687 .globl machine_check_common
688machine_check_common:
689 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
690 DISABLE_INTS
691 bl .save_nvgprs
692 addi r3,r1,STACK_FRAME_OVERHEAD
693 bl .machine_check_exception
694 b .ret_from_except
695
696 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
697 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
698 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
699 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
700 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
701 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
702 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
703#ifdef CONFIG_ALTIVEC
704 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
705#else
706 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
707#endif
708
709/*
710 * Here we have detected that the kernel stack pointer is bad.
711 * R9 contains the saved CR, r13 points to the paca,
712 * r10 contains the (bad) kernel stack pointer,
713 * r11 and r12 contain the saved SRR0 and SRR1.
714 * We switch to using the paca guard page as an emergency stack,
715 * save the registers there, and call kernel_bad_stack(), which panics.
716 */
717bad_stack:
718 ld r1,PACAEMERGSP(r13)
719 subi r1,r1,64+INT_FRAME_SIZE
720 std r9,_CCR(r1)
721 std r10,GPR1(r1)
722 std r11,_NIP(r1)
723 std r12,_MSR(r1)
724 mfspr r11,DAR
725 mfspr r12,DSISR
726 std r11,_DAR(r1)
727 std r12,_DSISR(r1)
728 mflr r10
729 mfctr r11
730 mfxer r12
731 std r10,_LINK(r1)
732 std r11,_CTR(r1)
733 std r12,_XER(r1)
734 SAVE_GPR(0,r1)
735 SAVE_GPR(2,r1)
736 SAVE_4GPRS(3,r1)
737 SAVE_2GPRS(7,r1)
738 SAVE_10GPRS(12,r1)
739 SAVE_10GPRS(22,r1)
740 addi r11,r1,INT_FRAME_SIZE
741 std r11,0(r1)
742 li r12,0
743 std r12,0(r11)
744 ld r2,PACATOC(r13)
7451: addi r3,r1,STACK_FRAME_OVERHEAD
746 bl .kernel_bad_stack
747 b 1b
748
749/*
750 * Return from an exception with minimal checks.
751 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
752 * If interrupts have been enabled, or anything has been
753 * done that might have changed the scheduling status of
754 * any task or sent any task a signal, you should use
755 * ret_from_except or ret_from_except_lite instead of this.
756 */
757fast_exception_return:
758 ld r12,_MSR(r1)
759 ld r11,_NIP(r1)
760 andi. r3,r12,MSR_RI /* check if RI is set */
761 beq- unrecov_fer
762 ld r3,_CCR(r1)
763 ld r4,_LINK(r1)
764 ld r5,_CTR(r1)
765 ld r6,_XER(r1)
766 mtcr r3
767 mtlr r4
768 mtctr r5
769 mtxer r6
770 REST_GPR(0, r1)
771 REST_8GPRS(2, r1)
772
773 mfmsr r10
774 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
775 mtmsrd r10,1
776
777 mtspr SRR1,r12
778 mtspr SRR0,r11
779 REST_4GPRS(10, r1)
780 ld r1,GPR1(r1)
781 rfid
782 b . /* prevent speculative execution */
783
784unrecov_fer:
785 bl .save_nvgprs
7861: addi r3,r1,STACK_FRAME_OVERHEAD
787 bl .unrecoverable_exception
788 b 1b
789
790/*
791 * Here r13 points to the paca, r9 contains the saved CR,
792 * SRR0 and SRR1 are saved in r11 and r12,
793 * r9 - r13 are saved in paca->exgen.
794 */
795 .align 7
796 .globl data_access_common
797data_access_common:
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700798 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 mfspr r10,DAR
800 std r10,PACA_EXGEN+EX_DAR(r13)
801 mfspr r10,DSISR
802 stw r10,PACA_EXGEN+EX_DSISR(r13)
803 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
804 ld r3,PACA_EXGEN+EX_DAR(r13)
805 lwz r4,PACA_EXGEN+EX_DSISR(r13)
806 li r5,0x300
807 b .do_hash_page /* Try to handle as hpte fault */
808
809 .align 7
810 .globl instruction_access_common
811instruction_access_common:
812 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
813 ld r3,_NIP(r1)
814 andis. r4,r12,0x5820
815 li r5,0x400
816 b .do_hash_page /* Try to handle as hpte fault */
817
818 .align 7
819 .globl hardware_interrupt_common
820 .globl hardware_interrupt_entry
821hardware_interrupt_common:
822 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
823hardware_interrupt_entry:
824 DISABLE_INTS
825 addi r3,r1,STACK_FRAME_OVERHEAD
826 bl .do_IRQ
827 b .ret_from_except_lite
828
829 .align 7
830 .globl alignment_common
831alignment_common:
832 mfspr r10,DAR
833 std r10,PACA_EXGEN+EX_DAR(r13)
834 mfspr r10,DSISR
835 stw r10,PACA_EXGEN+EX_DSISR(r13)
836 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
837 ld r3,PACA_EXGEN+EX_DAR(r13)
838 lwz r4,PACA_EXGEN+EX_DSISR(r13)
839 std r3,_DAR(r1)
840 std r4,_DSISR(r1)
841 bl .save_nvgprs
842 addi r3,r1,STACK_FRAME_OVERHEAD
843 ENABLE_INTS
844 bl .alignment_exception
845 b .ret_from_except
846
847 .align 7
848 .globl program_check_common
849program_check_common:
850 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
851 bl .save_nvgprs
852 addi r3,r1,STACK_FRAME_OVERHEAD
853 ENABLE_INTS
854 bl .program_check_exception
855 b .ret_from_except
856
857 .align 7
858 .globl fp_unavailable_common
859fp_unavailable_common:
860 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
861 bne .load_up_fpu /* if from user, just load it up */
862 bl .save_nvgprs
863 addi r3,r1,STACK_FRAME_OVERHEAD
864 ENABLE_INTS
865 bl .kernel_fp_unavailable_exception
866 BUG_OPCODE
867
David Gibsonec465512005-08-19 14:52:31 +1000868/*
869 * load_up_fpu(unused, unused, tsk)
870 * Disable FP for the task which had the FPU previously,
871 * and save its floating-point registers in its thread_struct.
872 * Enables the FPU for use in the kernel on return.
873 * On SMP we know the fpu is free, since we give it up every
874 * switch (ie, no lazy save of the FP registers).
875 * On entry: r13 == 'current' && last_task_used_math != 'current'
876 */
877_STATIC(load_up_fpu)
878 mfmsr r5 /* grab the current MSR */
879 ori r5,r5,MSR_FP
880 mtmsrd r5 /* enable use of fpu now */
881 isync
882/*
883 * For SMP, we don't do lazy FPU switching because it just gets too
884 * horrendously complex, especially when a task switches from one CPU
885 * to another. Instead we call giveup_fpu in switch_to.
886 *
887 */
888#ifndef CONFIG_SMP
889 ld r3,last_task_used_math@got(r2)
890 ld r4,0(r3)
891 cmpdi 0,r4,0
892 beq 1f
893 /* Save FP state to last_task_used_math's THREAD struct */
894 addi r4,r4,THREAD
895 SAVE_32FPRS(0, r4)
896 mffs fr0
897 stfd fr0,THREAD_FPSCR(r4)
898 /* Disable FP for last_task_used_math */
899 ld r5,PT_REGS(r4)
900 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
901 li r6,MSR_FP|MSR_FE0|MSR_FE1
902 andc r4,r4,r6
903 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9041:
905#endif /* CONFIG_SMP */
906 /* enable use of FP after return */
907 ld r4,PACACURRENT(r13)
908 addi r5,r4,THREAD /* Get THREAD */
909 ld r4,THREAD_FPEXC_MODE(r5)
910 ori r12,r12,MSR_FP
911 or r12,r12,r4
912 std r12,_MSR(r1)
913 lfd fr0,THREAD_FPSCR(r5)
914 mtfsf 0xff,fr0
915 REST_32FPRS(0, r5)
916#ifndef CONFIG_SMP
917 /* Update last_task_used_math to 'current' */
918 subi r4,r5,THREAD /* Back to 'current' */
919 std r4,0(r3)
920#endif /* CONFIG_SMP */
921 /* restore registers and return */
922 b fast_exception_return
923
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 .align 7
925 .globl altivec_unavailable_common
926altivec_unavailable_common:
927 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
928#ifdef CONFIG_ALTIVEC
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700929BEGIN_FTR_SECTION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 bne .load_up_altivec /* if from user, just load it up */
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700931END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932#endif
933 bl .save_nvgprs
934 addi r3,r1,STACK_FRAME_OVERHEAD
935 ENABLE_INTS
936 bl .altivec_unavailable_exception
937 b .ret_from_except
938
David Gibsonec465512005-08-19 14:52:31 +1000939#ifdef CONFIG_ALTIVEC
940/*
941 * load_up_altivec(unused, unused, tsk)
942 * Disable VMX for the task which had it previously,
943 * and save its vector registers in its thread_struct.
944 * Enables the VMX for use in the kernel on return.
945 * On SMP we know the VMX is free, since we give it up every
946 * switch (ie, no lazy save of the vector registers).
947 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
948 */
949_STATIC(load_up_altivec)
950 mfmsr r5 /* grab the current MSR */
951 oris r5,r5,MSR_VEC@h
952 mtmsrd r5 /* enable use of VMX now */
953 isync
954
955/*
956 * For SMP, we don't do lazy VMX switching because it just gets too
957 * horrendously complex, especially when a task switches from one CPU
958 * to another. Instead we call giveup_altvec in switch_to.
959 * VRSAVE isn't dealt with here, that is done in the normal context
960 * switch code. Note that we could rely on vrsave value to eventually
961 * avoid saving all of the VREGs here...
962 */
963#ifndef CONFIG_SMP
964 ld r3,last_task_used_altivec@got(r2)
965 ld r4,0(r3)
966 cmpdi 0,r4,0
967 beq 1f
968 /* Save VMX state to last_task_used_altivec's THREAD struct */
969 addi r4,r4,THREAD
970 SAVE_32VRS(0,r5,r4)
971 mfvscr vr0
972 li r10,THREAD_VSCR
973 stvx vr0,r10,r4
974 /* Disable VMX for last_task_used_altivec */
975 ld r5,PT_REGS(r4)
976 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
977 lis r6,MSR_VEC@h
978 andc r4,r4,r6
979 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9801:
981#endif /* CONFIG_SMP */
982 /* Hack: if we get an altivec unavailable trap with VRSAVE
983 * set to all zeros, we assume this is a broken application
984 * that fails to set it properly, and thus we switch it to
985 * all 1's
986 */
987 mfspr r4,SPRN_VRSAVE
988 cmpdi 0,r4,0
989 bne+ 1f
990 li r4,-1
991 mtspr SPRN_VRSAVE,r4
9921:
993 /* enable use of VMX after return */
994 ld r4,PACACURRENT(r13)
995 addi r5,r4,THREAD /* Get THREAD */
996 oris r12,r12,MSR_VEC@h
997 std r12,_MSR(r1)
998 li r4,1
999 li r10,THREAD_VSCR
1000 stw r4,THREAD_USED_VR(r5)
1001 lvx vr0,r10,r5
1002 mtvscr vr0
1003 REST_32VRS(0,r4,r5)
1004#ifndef CONFIG_SMP
1005 /* Update last_task_used_math to 'current' */
1006 subi r4,r5,THREAD /* Back to 'current' */
1007 std r4,0(r3)
1008#endif /* CONFIG_SMP */
1009 /* restore registers and return */
1010 b fast_exception_return
1011#endif /* CONFIG_ALTIVEC */
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013/*
1014 * Hash table stuff
1015 */
1016 .align 7
1017_GLOBAL(do_hash_page)
1018 std r3,_DAR(r1)
1019 std r4,_DSISR(r1)
1020
1021 andis. r0,r4,0xa450 /* weird error? */
1022 bne- .handle_page_fault /* if not, try to insert a HPTE */
1023BEGIN_FTR_SECTION
1024 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1025 bne- .do_ste_alloc /* If so handle it */
1026END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1027
1028 /*
1029 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1030 * accessing a userspace segment (even from the kernel). We assume
1031 * kernel addresses always have the high bit set.
1032 */
1033 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1034 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1035 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1036 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1037 ori r4,r4,1 /* add _PAGE_PRESENT */
1038 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1039
1040 /*
1041 * On iSeries, we soft-disable interrupts here, then
1042 * hard-enable interrupts so that the hash_page code can spin on
1043 * the hash_table_lock without problems on a shared processor.
1044 */
1045 DISABLE_INTS
1046
1047 /*
1048 * r3 contains the faulting address
1049 * r4 contains the required access permissions
1050 * r5 contains the trap number
1051 *
1052 * at return r3 = 0 for success
1053 */
1054 bl .hash_page /* build HPTE if possible */
1055 cmpdi r3,0 /* see if hash_page succeeded */
1056
1057#ifdef DO_SOFT_DISABLE
1058 /*
1059 * If we had interrupts soft-enabled at the point where the
1060 * DSI/ISI occurred, and an interrupt came in during hash_page,
1061 * handle it now.
1062 * We jump to ret_from_except_lite rather than fast_exception_return
1063 * because ret_from_except_lite will check for and handle pending
1064 * interrupts if necessary.
1065 */
1066 beq .ret_from_except_lite
1067 /* For a hash failure, we don't bother re-enabling interrupts */
1068 ble- 12f
1069
1070 /*
1071 * hash_page couldn't handle it, set soft interrupt enable back
1072 * to what it was before the trap. Note that .local_irq_restore
1073 * handles any interrupts pending at this point.
1074 */
1075 ld r3,SOFTE(r1)
1076 bl .local_irq_restore
1077 b 11f
1078#else
1079 beq fast_exception_return /* Return from exception on success */
1080 ble- 12f /* Failure return from hash_page */
1081
1082 /* fall through */
1083#endif
1084
1085/* Here we have a page fault that hash_page can't handle. */
1086_GLOBAL(handle_page_fault)
1087 ENABLE_INTS
108811: ld r4,_DAR(r1)
1089 ld r5,_DSISR(r1)
1090 addi r3,r1,STACK_FRAME_OVERHEAD
1091 bl .do_page_fault
1092 cmpdi r3,0
1093 beq+ .ret_from_except_lite
1094 bl .save_nvgprs
1095 mr r5,r3
1096 addi r3,r1,STACK_FRAME_OVERHEAD
1097 lwz r4,_DAR(r1)
1098 bl .bad_page_fault
1099 b .ret_from_except
1100
1101/* We have a page fault that hash_page could handle but HV refused
1102 * the PTE insertion
1103 */
110412: bl .save_nvgprs
1105 addi r3,r1,STACK_FRAME_OVERHEAD
1106 lwz r4,_DAR(r1)
1107 bl .low_hash_fault
1108 b .ret_from_except
1109
1110 /* here we have a segment miss */
1111_GLOBAL(do_ste_alloc)
1112 bl .ste_allocate /* try to insert stab entry */
1113 cmpdi r3,0
1114 beq+ fast_exception_return
1115 b .handle_page_fault
1116
1117/*
1118 * r13 points to the PACA, r9 contains the saved CR,
1119 * r11 and r12 contain the saved SRR0 and SRR1.
1120 * r9 - r13 are saved in paca->exslb.
1121 * We assume we aren't going to take any exceptions during this procedure.
1122 * We assume (DAR >> 60) == 0xc.
1123 */
1124 .align 7
1125_GLOBAL(do_stab_bolted)
1126 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1127 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1128
1129 /* Hash to the primary group */
1130 ld r10,PACASTABVIRT(r13)
1131 mfspr r11,DAR
1132 srdi r11,r11,28
1133 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1134
1135 /* Calculate VSID */
1136 /* This is a kernel address, so protovsid = ESID */
1137 ASM_VSID_SCRAMBLE(r11, r9)
1138 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1139
1140 /* Search the primary group for a free entry */
11411: ld r11,0(r10) /* Test valid bit of the current ste */
1142 andi. r11,r11,0x80
1143 beq 2f
1144 addi r10,r10,16
1145 andi. r11,r10,0x70
1146 bne 1b
1147
1148 /* Stick for only searching the primary group for now. */
1149 /* At least for now, we use a very simple random castout scheme */
1150 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1151 mftb r11
1152 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1153 ori r11,r11,0x10
1154
1155 /* r10 currently points to an ste one past the group of interest */
1156 /* make it point to the randomly selected entry */
1157 subi r10,r10,128
1158 or r10,r10,r11 /* r10 is the entry to invalidate */
1159
1160 isync /* mark the entry invalid */
1161 ld r11,0(r10)
1162 rldicl r11,r11,56,1 /* clear the valid bit */
1163 rotldi r11,r11,8
1164 std r11,0(r10)
1165 sync
1166
1167 clrrdi r11,r11,28 /* Get the esid part of the ste */
1168 slbie r11
1169
11702: std r9,8(r10) /* Store the vsid part of the ste */
1171 eieio
1172
1173 mfspr r11,DAR /* Get the new esid */
1174 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1175 ori r11,r11,0x90 /* Turn on valid and kp */
1176 std r11,0(r10) /* Put new entry back into the stab */
1177
1178 sync
1179
1180 /* All done -- return from exception. */
1181 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1182 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1183
1184 andi. r10,r12,MSR_RI
1185 beq- unrecov_slb
1186
1187 mtcrf 0x80,r9 /* restore CR */
1188
1189 mfmsr r10
1190 clrrdi r10,r10,2
1191 mtmsrd r10,1
1192
1193 mtspr SRR0,r11
1194 mtspr SRR1,r12
1195 ld r9,PACA_EXSLB+EX_R9(r13)
1196 ld r10,PACA_EXSLB+EX_R10(r13)
1197 ld r11,PACA_EXSLB+EX_R11(r13)
1198 ld r12,PACA_EXSLB+EX_R12(r13)
1199 ld r13,PACA_EXSLB+EX_R13(r13)
1200 rfid
1201 b . /* prevent speculative execution */
1202
1203/*
1204 * r13 points to the PACA, r9 contains the saved CR,
1205 * r11 and r12 contain the saved SRR0 and SRR1.
1206 * r3 has the faulting address
1207 * r9 - r13 are saved in paca->exslb.
1208 * r3 is saved in paca->slb_r3
1209 * We assume we aren't going to take any exceptions during this procedure.
1210 */
1211_GLOBAL(do_slb_miss)
1212 mflr r10
1213
1214 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1215 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1216
1217 bl .slb_allocate /* handle it */
1218
1219 /* All done -- return from exception. */
1220
1221 ld r10,PACA_EXSLB+EX_LR(r13)
1222 ld r3,PACA_EXSLB+EX_R3(r13)
1223 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1224#ifdef CONFIG_PPC_ISERIES
1225 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1226#endif /* CONFIG_PPC_ISERIES */
1227
1228 mtlr r10
1229
1230 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1231 beq- unrecov_slb
1232
1233.machine push
1234.machine "power4"
1235 mtcrf 0x80,r9
1236 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1237.machine pop
1238
1239#ifdef CONFIG_PPC_ISERIES
1240 mtspr SRR0,r11
1241 mtspr SRR1,r12
1242#endif /* CONFIG_PPC_ISERIES */
1243 ld r9,PACA_EXSLB+EX_R9(r13)
1244 ld r10,PACA_EXSLB+EX_R10(r13)
1245 ld r11,PACA_EXSLB+EX_R11(r13)
1246 ld r12,PACA_EXSLB+EX_R12(r13)
1247 ld r13,PACA_EXSLB+EX_R13(r13)
1248 rfid
1249 b . /* prevent speculative execution */
1250
1251unrecov_slb:
1252 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1253 DISABLE_INTS
1254 bl .save_nvgprs
12551: addi r3,r1,STACK_FRAME_OVERHEAD
1256 bl .unrecoverable_exception
1257 b 1b
1258
David Gibsonec465512005-08-19 14:52:31 +10001259/*
1260 * Data area reserved for FWNMI option.
1261 * This address (0x7000) is fixed by the RPA.
1262 */
1263 .= 0x7000
1264 .globl fwnmi_data_area
1265fwnmi_data_area:
1266 .space PAGE_SIZE
1267
1268 /*
1269 * Space for the initial segment table
1270 * For LPAR, the hypervisor must fill in at least one entry
1271 * before we get control (with relocate on)
1272 */
1273 . = STAB0_PHYS_ADDR
1274 .globl __start_stab
1275__start_stab:
1276
1277 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
1278 .globl __end_stab
1279__end_stab:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
1281/*
1282 * On pSeries, secondary processors spin in the following code.
1283 * At entry, r3 = this processor's number (physical cpu id)
1284 */
1285_GLOBAL(pSeries_secondary_smp_init)
1286 mr r24,r3
1287
1288 /* turn on 64-bit mode */
1289 bl .enable_64b_mode
1290 isync
1291
1292 /* Copy some CPU settings from CPU 0 */
1293 bl .__restore_cpu_setup
1294
1295 /* Set up a paca value for this processor. Since we have the
R Sharadafce0d572005-06-25 14:58:10 -07001296 * physical cpu id in r24, we need to search the pacas to find
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 * which logical id maps to our physical one.
1298 */
1299 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1300 li r5,0 /* logical cpu id */
13011: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1302 cmpw r6,r24 /* Compare to our id */
1303 beq 2f
1304 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1305 addi r5,r5,1
1306 cmpwi r5,NR_CPUS
1307 blt 1b
1308
R Sharadafce0d572005-06-25 14:58:10 -07001309 mr r3,r24 /* not found, copy phys to r3 */
1310 b .kexec_wait /* next kernel might do better */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
13122: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1313 /* From now on, r24 is expected to be logica cpuid */
1314 mr r24,r5
13153: HMT_LOW
1316 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1317 /* start. */
1318 sync
1319
1320 /* Create a temp kernel stack for use before relocation is on. */
1321 ld r1,PACAEMERGSP(r13)
1322 subi r1,r1,STACK_FRAME_OVERHEAD
1323
1324 cmpwi 0,r23,0
1325#ifdef CONFIG_SMP
1326#ifdef SECONDARY_PROCESSORS
1327 bne .__secondary_start
1328#endif
1329#endif
1330 b 3b /* Loop until told to go */
1331
1332#ifdef CONFIG_PPC_ISERIES
1333_STATIC(__start_initialization_iSeries)
1334 /* Clear out the BSS */
1335 LOADADDR(r11,__bss_stop)
1336 LOADADDR(r8,__bss_start)
1337 sub r11,r11,r8 /* bss size */
1338 addi r11,r11,7 /* round up to an even double word */
1339 rldicl. r11,r11,61,3 /* shift right by 3 */
1340 beq 4f
1341 addi r8,r8,-8
1342 li r0,0
1343 mtctr r11 /* zero this many doublewords */
13443: stdu r0,8(r8)
1345 bdnz 3b
13464:
1347 LOADADDR(r1,init_thread_union)
1348 addi r1,r1,THREAD_SIZE
1349 li r0,0
1350 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1351
1352 LOADADDR(r3,cpu_specs)
1353 LOADADDR(r4,cur_cpu_spec)
1354 li r5,0
1355 bl .identify_cpu
1356
1357 LOADADDR(r2,__toc_start)
1358 addi r2,r2,0x4000
1359 addi r2,r2,0x4000
1360
1361 bl .iSeries_early_setup
1362
1363 /* relocation is on at this point */
1364
1365 b .start_here_common
1366#endif /* CONFIG_PPC_ISERIES */
1367
1368#ifdef CONFIG_PPC_MULTIPLATFORM
1369
1370_STATIC(__mmu_off)
1371 mfmsr r3
1372 andi. r0,r3,MSR_IR|MSR_DR
1373 beqlr
1374 andc r3,r3,r0
1375 mtspr SPRN_SRR0,r4
1376 mtspr SPRN_SRR1,r3
1377 sync
1378 rfid
1379 b . /* prevent speculative execution */
1380
1381
1382/*
1383 * Here is our main kernel entry point. We support currently 2 kind of entries
1384 * depending on the value of r5.
1385 *
1386 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1387 * in r3...r7
1388 *
1389 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1390 * DT block, r4 is a physical pointer to the kernel itself
1391 *
1392 */
1393_GLOBAL(__start_initialization_multiplatform)
1394 /*
1395 * Are we booted from a PROM Of-type client-interface ?
1396 */
1397 cmpldi cr0,r5,0
1398 bne .__boot_from_prom /* yes -> prom */
1399
1400 /* Save parameters */
1401 mr r31,r3
1402 mr r30,r4
1403
1404 /* Make sure we are running in 64 bits mode */
1405 bl .enable_64b_mode
1406
1407 /* Setup some critical 970 SPRs before switching MMU off */
1408 bl .__970_cpu_preinit
1409
1410 /* cpu # */
1411 li r24,0
1412
1413 /* Switch off MMU if not already */
1414 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1415 add r4,r4,r30
1416 bl .__mmu_off
1417 b .__after_prom_start
1418
1419_STATIC(__boot_from_prom)
1420 /* Save parameters */
1421 mr r31,r3
1422 mr r30,r4
1423 mr r29,r5
1424 mr r28,r6
1425 mr r27,r7
1426
1427 /* Make sure we are running in 64 bits mode */
1428 bl .enable_64b_mode
1429
1430 /* put a relocation offset into r3 */
1431 bl .reloc_offset
1432
1433 LOADADDR(r2,__toc_start)
1434 addi r2,r2,0x4000
1435 addi r2,r2,0x4000
1436
1437 /* Relocate the TOC from a virt addr to a real addr */
1438 sub r2,r2,r3
1439
1440 /* Restore parameters */
1441 mr r3,r31
1442 mr r4,r30
1443 mr r5,r29
1444 mr r6,r28
1445 mr r7,r27
1446
1447 /* Do all of the interaction with OF client interface */
1448 bl .prom_init
1449 /* We never return */
1450 trap
1451
1452/*
1453 * At this point, r3 contains the physical address we are running at,
1454 * returned by prom_init()
1455 */
1456_STATIC(__after_prom_start)
1457
1458/*
1459 * We need to run with __start at physical address 0.
1460 * This will leave some code in the first 256B of
1461 * real memory, which are reserved for software use.
1462 * The remainder of the first page is loaded with the fixed
1463 * interrupt vectors. The next two pages are filled with
1464 * unknown exception placeholders.
1465 *
1466 * Note: This process overwrites the OF exception vectors.
1467 * r26 == relocation offset
1468 * r27 == KERNELBASE
1469 */
1470 bl .reloc_offset
1471 mr r26,r3
1472 SET_REG_TO_CONST(r27,KERNELBASE)
1473
1474 li r3,0 /* target addr */
1475
1476 // XXX FIXME: Use phys returned by OF (r30)
1477 sub r4,r27,r26 /* source addr */
1478 /* current address of _start */
1479 /* i.e. where we are running */
1480 /* the source addr */
1481
1482 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1483 sub r5,r5,r27
1484
1485 li r6,0x100 /* Start offset, the first 0x100 */
1486 /* bytes were copied earlier. */
1487
1488 bl .copy_and_flush /* copy the first n bytes */
1489 /* this includes the code being */
1490 /* executed here. */
1491
1492 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1493 mtctr r0 /* that we just made/relocated */
1494 bctr
1495
14964: LOADADDR(r5,klimit)
1497 sub r5,r5,r26
1498 ld r5,0(r5) /* get the value of klimit */
1499 sub r5,r5,r27
1500 bl .copy_and_flush /* copy the rest */
1501 b .start_here_multiplatform
1502
1503#endif /* CONFIG_PPC_MULTIPLATFORM */
1504
1505/*
1506 * Copy routine used to copy the kernel to start at physical address 0
1507 * and flush and invalidate the caches as needed.
1508 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1509 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1510 *
1511 * Note: this routine *only* clobbers r0, r6 and lr
1512 */
1513_GLOBAL(copy_and_flush)
1514 addi r5,r5,-8
1515 addi r6,r6,-8
15164: li r0,16 /* Use the least common */
1517 /* denominator cache line */
1518 /* size. This results in */
1519 /* extra cache line flushes */
1520 /* but operation is correct. */
1521 /* Can't get cache line size */
1522 /* from NACA as it is being */
1523 /* moved too. */
1524
1525 mtctr r0 /* put # words/line in ctr */
15263: addi r6,r6,8 /* copy a cache line */
1527 ldx r0,r6,r4
1528 stdx r0,r6,r3
1529 bdnz 3b
1530 dcbst r6,r3 /* write it to memory */
1531 sync
1532 icbi r6,r3 /* flush the icache line */
1533 cmpld 0,r6,r5
1534 blt 4b
1535 sync
1536 addi r5,r5,8
1537 addi r6,r6,8
1538 blr
1539
1540.align 8
1541copy_to_here:
1542
1543/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 * disable_kernel_fp()
1545 * Disable the FPU.
1546 */
1547_GLOBAL(disable_kernel_fp)
1548 mfmsr r3
1549 rldicl r0,r3,(63-MSR_FP_LG),1
1550 rldicl r3,r0,(MSR_FP_LG+1),0
1551 mtmsrd r3 /* disable use of fpu now */
1552 isync
1553 blr
1554
1555/*
1556 * giveup_fpu(tsk)
1557 * Disable FP for the task given as the argument,
1558 * and save the floating-point registers in its thread_struct.
1559 * Enables the FPU for use in the kernel on return.
1560 */
1561_GLOBAL(giveup_fpu)
1562 mfmsr r5
1563 ori r5,r5,MSR_FP
1564 mtmsrd r5 /* enable use of fpu now */
1565 isync
1566 cmpdi 0,r3,0
1567 beqlr- /* if no previous owner, done */
1568 addi r3,r3,THREAD /* want THREAD of task */
1569 ld r5,PT_REGS(r3)
1570 cmpdi 0,r5,0
1571 SAVE_32FPRS(0, r3)
1572 mffs fr0
1573 stfd fr0,THREAD_FPSCR(r3)
1574 beq 1f
1575 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1576 li r3,MSR_FP|MSR_FE0|MSR_FE1
1577 andc r4,r4,r3 /* disable FP for previous task */
1578 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15791:
1580#ifndef CONFIG_SMP
1581 li r5,0
1582 ld r4,last_task_used_math@got(r2)
1583 std r5,0(r4)
1584#endif /* CONFIG_SMP */
1585 blr
1586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587#ifdef CONFIG_ALTIVEC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588/*
1589 * disable_kernel_altivec()
1590 * Disable the VMX.
1591 */
1592_GLOBAL(disable_kernel_altivec)
1593 mfmsr r3
1594 rldicl r0,r3,(63-MSR_VEC_LG),1
1595 rldicl r3,r0,(MSR_VEC_LG+1),0
1596 mtmsrd r3 /* disable use of VMX now */
1597 isync
1598 blr
1599
1600/*
1601 * giveup_altivec(tsk)
1602 * Disable VMX for the task given as the argument,
1603 * and save the vector registers in its thread_struct.
1604 * Enables the VMX for use in the kernel on return.
1605 */
1606_GLOBAL(giveup_altivec)
1607 mfmsr r5
1608 oris r5,r5,MSR_VEC@h
1609 mtmsrd r5 /* enable use of VMX now */
1610 isync
1611 cmpdi 0,r3,0
1612 beqlr- /* if no previous owner, done */
1613 addi r3,r3,THREAD /* want THREAD of task */
1614 ld r5,PT_REGS(r3)
1615 cmpdi 0,r5,0
1616 SAVE_32VRS(0,r4,r3)
1617 mfvscr vr0
1618 li r4,THREAD_VSCR
1619 stvx vr0,r4,r3
1620 beq 1f
1621 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1622 lis r3,MSR_VEC@h
1623 andc r4,r4,r3 /* disable FP for previous task */
1624 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16251:
1626#ifndef CONFIG_SMP
1627 li r5,0
1628 ld r4,last_task_used_altivec@got(r2)
1629 std r5,0(r4)
1630#endif /* CONFIG_SMP */
1631 blr
1632
1633#endif /* CONFIG_ALTIVEC */
1634
1635#ifdef CONFIG_SMP
1636#ifdef CONFIG_PPC_PMAC
1637/*
1638 * On PowerMac, secondary processors starts from the reset vector, which
1639 * is temporarily turned into a call to one of the functions below.
1640 */
1641 .section ".text";
1642 .align 2 ;
1643
1644 .globl pmac_secondary_start_1
1645pmac_secondary_start_1:
1646 li r24, 1
1647 b .pmac_secondary_start
1648
1649 .globl pmac_secondary_start_2
1650pmac_secondary_start_2:
1651 li r24, 2
1652 b .pmac_secondary_start
1653
1654 .globl pmac_secondary_start_3
1655pmac_secondary_start_3:
1656 li r24, 3
1657 b .pmac_secondary_start
1658
1659_GLOBAL(pmac_secondary_start)
1660 /* turn on 64-bit mode */
1661 bl .enable_64b_mode
1662 isync
1663
1664 /* Copy some CPU settings from CPU 0 */
1665 bl .__restore_cpu_setup
1666
1667 /* pSeries do that early though I don't think we really need it */
1668 mfmsr r3
1669 ori r3,r3,MSR_RI
1670 mtmsrd r3 /* RI on */
1671
1672 /* Set up a paca value for this processor. */
1673 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1674 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1675 add r13,r13,r4 /* for this processor. */
1676 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1677
1678 /* Create a temp kernel stack for use before relocation is on. */
1679 ld r1,PACAEMERGSP(r13)
1680 subi r1,r1,STACK_FRAME_OVERHEAD
1681
1682 b .__secondary_start
1683
1684#endif /* CONFIG_PPC_PMAC */
1685
1686/*
1687 * This function is called after the master CPU has released the
1688 * secondary processors. The execution environment is relocation off.
1689 * The paca for this processor has the following fields initialized at
1690 * this point:
1691 * 1. Processor number
1692 * 2. Segment table pointer (virtual address)
1693 * On entry the following are set:
1694 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1695 * r24 = cpu# (in Linux terms)
1696 * r13 = paca virtual address
1697 * SPRG3 = paca virtual address
1698 */
1699_GLOBAL(__secondary_start)
1700
1701 HMT_MEDIUM /* Set thread priority to MEDIUM */
1702
1703 ld r2,PACATOC(r13)
1704 li r6,0
1705 stb r6,PACAPROCENABLED(r13)
1706
1707#ifndef CONFIG_PPC_ISERIES
1708 /* Initialize the page table pointer register. */
1709 LOADADDR(r6,_SDR1)
1710 ld r6,0(r6) /* get the value of _SDR1 */
1711 mtspr SDR1,r6 /* set the htab location */
1712#endif
1713 /* Initialize the first segment table (or SLB) entry */
1714 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1715 bl .stab_initialize
1716
1717 /* Initialize the kernel stack. Just a repeat for iSeries. */
1718 LOADADDR(r3,current_set)
1719 sldi r28,r24,3 /* get current_set[cpu#] */
1720 ldx r1,r3,r28
1721 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1722 std r1,PACAKSAVE(r13)
1723
1724 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1725 ori r4,r3,1 /* turn on valid bit */
1726
1727#ifdef CONFIG_PPC_ISERIES
1728 li r0,-1 /* hypervisor call */
1729 li r3,1
1730 sldi r3,r3,63 /* 0x8000000000000000 */
1731 ori r3,r3,4 /* 0x8000000000000004 */
1732 sc /* HvCall_setASR */
1733#else
1734 /* set the ASR */
1735 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1736 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1737 cmpldi r3,PLATFORM_PSERIES_LPAR
1738 bne 98f
1739 mfspr r3,PVR
1740 srwi r3,r3,16
1741 cmpwi r3,0x37 /* SStar */
1742 beq 97f
1743 cmpwi r3,0x36 /* IStar */
1744 beq 97f
1745 cmpwi r3,0x34 /* Pulsar */
1746 bne 98f
174797: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1748 HVSC /* Invoking hcall */
1749 b 99f
175098: /* !(rpa hypervisor) || !(star) */
1751 mtasr r4 /* set the stab location */
175299:
1753#endif
1754 li r7,0
1755 mtlr r7
1756
1757 /* enable MMU and jump to start_secondary */
1758 LOADADDR(r3,.start_secondary_prolog)
1759 SET_REG_TO_CONST(r4, MSR_KERNEL)
1760#ifdef DO_SOFT_DISABLE
1761 ori r4,r4,MSR_EE
1762#endif
1763 mtspr SRR0,r3
1764 mtspr SRR1,r4
1765 rfid
1766 b . /* prevent speculative execution */
1767
1768/*
1769 * Running with relocation on at this point. All we want to do is
1770 * zero the stack back-chain pointer before going into C code.
1771 */
1772_GLOBAL(start_secondary_prolog)
1773 li r3,0
1774 std r3,0(r1) /* Zero the stack frame pointer */
1775 bl .start_secondary
1776#endif
1777
1778/*
1779 * This subroutine clobbers r11 and r12
1780 */
1781_GLOBAL(enable_64b_mode)
1782 mfmsr r11 /* grab the current MSR */
1783 li r12,1
1784 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1785 or r11,r11,r12
1786 li r12,1
1787 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1788 or r11,r11,r12
1789 mtmsrd r11
1790 isync
1791 blr
1792
1793#ifdef CONFIG_PPC_MULTIPLATFORM
1794/*
1795 * This is where the main kernel code starts.
1796 */
1797_STATIC(start_here_multiplatform)
1798 /* get a new offset, now that the kernel has moved. */
1799 bl .reloc_offset
1800 mr r26,r3
1801
1802 /* Clear out the BSS. It may have been done in prom_init,
1803 * already but that's irrelevant since prom_init will soon
1804 * be detached from the kernel completely. Besides, we need
1805 * to clear it now for kexec-style entry.
1806 */
1807 LOADADDR(r11,__bss_stop)
1808 LOADADDR(r8,__bss_start)
1809 sub r11,r11,r8 /* bss size */
1810 addi r11,r11,7 /* round up to an even double word */
1811 rldicl. r11,r11,61,3 /* shift right by 3 */
1812 beq 4f
1813 addi r8,r8,-8
1814 li r0,0
1815 mtctr r11 /* zero this many doublewords */
18163: stdu r0,8(r8)
1817 bdnz 3b
18184:
1819
1820 mfmsr r6
1821 ori r6,r6,MSR_RI
1822 mtmsrd r6 /* RI on */
1823
1824#ifdef CONFIG_HMT
1825 /* Start up the second thread on cpu 0 */
1826 mfspr r3,PVR
1827 srwi r3,r3,16
1828 cmpwi r3,0x34 /* Pulsar */
1829 beq 90f
1830 cmpwi r3,0x36 /* Icestar */
1831 beq 90f
1832 cmpwi r3,0x37 /* SStar */
1833 beq 90f
1834 b 91f /* HMT not supported */
183590: li r3,0
1836 bl .hmt_start_secondary
183791:
1838#endif
1839
1840 /* The following gets the stack and TOC set up with the regs */
1841 /* pointing to the real addr of the kernel stack. This is */
1842 /* all done to support the C function call below which sets */
1843 /* up the htab. This is done because we have relocated the */
1844 /* kernel but are still running in real mode. */
1845
1846 LOADADDR(r3,init_thread_union)
1847 sub r3,r3,r26
1848
1849 /* set up a stack pointer (physical address) */
1850 addi r1,r3,THREAD_SIZE
1851 li r0,0
1852 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1853
1854 /* set up the TOC (physical address) */
1855 LOADADDR(r2,__toc_start)
1856 addi r2,r2,0x4000
1857 addi r2,r2,0x4000
1858 sub r2,r2,r26
1859
1860 LOADADDR(r3,cpu_specs)
1861 sub r3,r3,r26
1862 LOADADDR(r4,cur_cpu_spec)
1863 sub r4,r4,r26
1864 mr r5,r26
1865 bl .identify_cpu
1866
1867 /* Save some low level config HIDs of CPU0 to be copied to
1868 * other CPUs later on, or used for suspend/resume
1869 */
1870 bl .__save_cpu_setup
1871 sync
1872
1873 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1874 * note that boot_cpuid can always be 0 nowadays since there is
1875 * nowhere it can be initialized differently before we reach this
1876 * code
1877 */
1878 LOADADDR(r27, boot_cpuid)
1879 sub r27,r27,r26
1880 lwz r27,0(r27)
1881
1882 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1883 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1884 add r13,r13,r24 /* for this processor. */
1885 sub r13,r13,r26 /* convert to physical addr */
1886 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1887
1888 /* Do very early kernel initializations, including initial hash table,
1889 * stab and slb setup before we turn on relocation. */
1890
1891 /* Restore parameters passed from prom_init/kexec */
1892 mr r3,r31
1893 bl .early_setup
1894
1895 /* set the ASR */
1896 ld r3,PACASTABREAL(r13)
1897 ori r4,r3,1 /* turn on valid bit */
1898 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1899 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1900 cmpldi r3,PLATFORM_PSERIES_LPAR
1901 bne 98f
1902 mfspr r3,PVR
1903 srwi r3,r3,16
1904 cmpwi r3,0x37 /* SStar */
1905 beq 97f
1906 cmpwi r3,0x36 /* IStar */
1907 beq 97f
1908 cmpwi r3,0x34 /* Pulsar */
1909 bne 98f
191097: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1911 HVSC /* Invoking hcall */
1912 b 99f
191398: /* !(rpa hypervisor) || !(star) */
1914 mtasr r4 /* set the stab location */
191599:
1916 /* Set SDR1 (hash table pointer) */
1917 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1918 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1919 /* Test if bit 0 is set (LPAR bit) */
1920 andi. r3,r3,0x1
1921 bne 98f
1922 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1923 sub r6,r6,r26
1924 ld r6,0(r6) /* get the value of _SDR1 */
1925 mtspr SDR1,r6 /* set the htab location */
192698:
1927 LOADADDR(r3,.start_here_common)
1928 SET_REG_TO_CONST(r4, MSR_KERNEL)
1929 mtspr SRR0,r3
1930 mtspr SRR1,r4
1931 rfid
1932 b . /* prevent speculative execution */
1933#endif /* CONFIG_PPC_MULTIPLATFORM */
1934
1935 /* This is where all platforms converge execution */
1936_STATIC(start_here_common)
1937 /* relocation is on at this point */
1938
1939 /* The following code sets up the SP and TOC now that we are */
1940 /* running with translation enabled. */
1941
1942 LOADADDR(r3,init_thread_union)
1943
1944 /* set up the stack */
1945 addi r1,r3,THREAD_SIZE
1946 li r0,0
1947 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1948
1949 /* Apply the CPUs-specific fixups (nop out sections not relevant
1950 * to this CPU
1951 */
1952 li r3,0
1953 bl .do_cpu_ftr_fixups
1954
1955 LOADADDR(r26, boot_cpuid)
1956 lwz r26,0(r26)
1957
1958 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1959 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1960 add r13,r13,r24 /* for this processor. */
1961 mtspr SPRG3,r13
1962
1963 /* ptr to current */
1964 LOADADDR(r4,init_task)
1965 std r4,PACACURRENT(r13)
1966
1967 /* Load the TOC */
1968 ld r2,PACATOC(r13)
1969 std r1,PACAKSAVE(r13)
1970
1971 bl .setup_system
1972
1973 /* Load up the kernel context */
19745:
1975#ifdef DO_SOFT_DISABLE
1976 li r5,0
1977 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1978 mfmsr r5
1979 ori r5,r5,MSR_EE /* Hard Enabled */
1980 mtmsrd r5
1981#endif
1982
1983 bl .start_kernel
1984
1985_GLOBAL(__setup_cpu_power3)
1986 blr
1987
1988_GLOBAL(hmt_init)
1989#ifdef CONFIG_HMT
1990 LOADADDR(r5, hmt_thread_data)
1991 mfspr r7,PVR
1992 srwi r7,r7,16
1993 cmpwi r7,0x34 /* Pulsar */
1994 beq 90f
1995 cmpwi r7,0x36 /* Icestar */
1996 beq 91f
1997 cmpwi r7,0x37 /* SStar */
1998 beq 91f
1999 b 101f
200090: mfspr r6,PIR
2001 andi. r6,r6,0x1f
2002 b 92f
200391: mfspr r6,PIR
2004 andi. r6,r6,0x3ff
200592: sldi r4,r24,3
2006 stwx r6,r5,r4
2007 bl .hmt_start_secondary
2008 b 101f
2009
2010__hmt_secondary_hold:
2011 LOADADDR(r5, hmt_thread_data)
2012 clrldi r5,r5,4
2013 li r7,0
2014 mfspr r6,PIR
2015 mfspr r8,PVR
2016 srwi r8,r8,16
2017 cmpwi r8,0x34
2018 bne 93f
2019 andi. r6,r6,0x1f
2020 b 103f
202193: andi. r6,r6,0x3f
2022
2023103: lwzx r8,r5,r7
2024 cmpw r8,r6
2025 beq 104f
2026 addi r7,r7,8
2027 b 103b
2028
2029104: addi r7,r7,4
2030 lwzx r9,r5,r7
2031 mr r24,r9
2032101:
2033#endif
2034 mr r3,r24
2035 b .pSeries_secondary_smp_init
2036
2037#ifdef CONFIG_HMT
2038_GLOBAL(hmt_start_secondary)
2039 LOADADDR(r4,__hmt_secondary_hold)
2040 clrldi r4,r4,4
2041 mtspr NIADORM, r4
2042 mfspr r4, MSRDORM
2043 li r5, -65
2044 and r4, r4, r5
2045 mtspr MSRDORM, r4
2046 lis r4,0xffef
2047 ori r4,r4,0x7403
2048 mtspr TSC, r4
2049 li r4,0x1f4
2050 mtspr TST, r4
2051 mfspr r4, HID0
2052 ori r4, r4, 0x1
2053 mtspr HID0, r4
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -07002054 mfspr r4, SPRN_CTRLF
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 oris r4, r4, 0x40
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -07002056 mtspr SPRN_CTRLT, r4
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 blr
2058#endif
2059
Olof Johansson75eedfe2005-08-04 12:53:29 -07002060#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061_GLOBAL(smp_release_cpus)
2062 /* All secondary cpus are spinning on a common
2063 * spinloop, release them all now so they can start
2064 * to spin on their individual paca spinloops.
2065 * For non SMP kernels, the secondary cpus never
2066 * get out of the common spinloop.
2067 */
2068 li r3,1
2069 LOADADDR(r5,__secondary_hold_spinloop)
2070 std r3,0(r5)
2071 sync
2072 blr
2073#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
2074
2075
2076/*
2077 * We put a few things here that have to be page-aligned.
2078 * This stuff goes at the beginning of the data segment,
2079 * which is page-aligned.
2080 */
2081 .data
2082 .align 12
2083 .globl sdata
2084sdata:
2085 .globl empty_zero_page
2086empty_zero_page:
2087 .space 4096
2088
2089 .globl swapper_pg_dir
2090swapper_pg_dir:
2091 .space 4096
2092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093/*
2094 * This space gets a copy of optional info passed to us by the bootstrap
2095 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2096 */
2097 .globl cmd_line
2098cmd_line:
2099 .space COMMAND_LINE_SIZE