blob: 13c03648a6023fc2d1a0dafc8e950dcbb7ee7e98 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h>
29#include <linux/threads.h>
30#include <asm/processor.h>
31#include <asm/page.h>
32#include <asm/mmu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/systemcfg.h>
34#include <asm/ppc_asm.h>
35#include <asm/offsets.h>
36#include <asm/bug.h>
37#include <asm/cputable.h>
38#include <asm/setup.h>
39#include <asm/hvcall.h>
Stephen Rothwell2ad56492005-08-17 13:01:50 +100040#include <asm/iSeries/LparMap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#ifdef CONFIG_PPC_ISERIES
43#define DO_SOFT_DISABLE
44#endif
45
46/*
47 * hcall interface to pSeries LPAR
48 */
49#define H_SET_ASR 0x30
50
51/*
52 * We layout physical memory as follows:
53 * 0x0000 - 0x00ff : Secondary processor spin code
54 * 0x0100 - 0x2fff : pSeries Interrupt prologs
55 * 0x3000 - 0x3fff : Interrupt support
56 * 0x4000 - 0x4fff : NACA
57 * 0x6000 : iSeries and common interrupt prologs
58 * 0x9000 - 0x9fff : Initial segment table
59 */
60
61/*
62 * SPRG Usage
63 *
64 * Register Definition
65 *
66 * SPRG0 reserved for hypervisor
67 * SPRG1 temp - used to save gpr
68 * SPRG2 temp - used to save gpr
69 * SPRG3 virt addr of paca
70 */
71
72/*
73 * Entering into this code we make the following assumptions:
74 * For pSeries:
75 * 1. The MMU is off & open firmware is running in real mode.
76 * 2. The kernel is entered at __start
77 *
78 * For iSeries:
79 * 1. The MMU is on (as it always is for iSeries)
80 * 2. The kernel is entered at system_reset_iSeries
81 */
82
83 .text
84 .globl _stext
85_stext:
86#ifdef CONFIG_PPC_MULTIPLATFORM
87_GLOBAL(__start)
88 /* NOP this out unconditionally */
89BEGIN_FTR_SECTION
90 b .__start_initialization_multiplatform
91END_FTR_SECTION(0, 1)
92#endif /* CONFIG_PPC_MULTIPLATFORM */
93
94 /* Catch branch to 0 in real mode */
95 trap
96#ifdef CONFIG_PPC_ISERIES
97 /*
98 * At offset 0x20, there is a pointer to iSeries LPAR data.
99 * This is required by the hypervisor
100 */
101 . = 0x20
102 .llong hvReleaseData-KERNELBASE
103
104 /*
105 * At offset 0x28 and 0x30 are offsets to the msChunks
106 * array (used by the iSeries LPAR debugger to do translation
107 * between physical addresses and absolute addresses) and
108 * to the pidhash table (also used by the debugger)
109 */
110 .llong msChunks-KERNELBASE
111 .llong 0 /* pidhash-KERNELBASE SFRXXX */
112
113 /* Offset 0x38 - Pointer to start of embedded System.map */
114 .globl embedded_sysmap_start
115embedded_sysmap_start:
116 .llong 0
117 /* Offset 0x40 - Pointer to end of embedded System.map */
118 .globl embedded_sysmap_end
119embedded_sysmap_end:
120 .llong 0
121
122#else /* CONFIG_PPC_ISERIES */
123
124 /* Secondary processors spin on this value until it goes to 1. */
125 .globl __secondary_hold_spinloop
126__secondary_hold_spinloop:
127 .llong 0x0
128
129 /* Secondary processors write this value with their cpu # */
130 /* after they enter the spin loop immediately below. */
131 .globl __secondary_hold_acknowledge
132__secondary_hold_acknowledge:
133 .llong 0x0
134
135 . = 0x60
136/*
137 * The following code is used on pSeries to hold secondary processors
138 * in a spin loop after they have been freed from OpenFirmware, but
139 * before the bulk of the kernel has been relocated. This code
140 * is relocated to physical address 0x60 before prom_init is run.
141 * All of it must fit below the first exception vector at 0x100.
142 */
143_GLOBAL(__secondary_hold)
144 mfmsr r24
145 ori r24,r24,MSR_RI
146 mtmsrd r24 /* RI on */
147
148 /* Grab our linux cpu number */
149 mr r24,r3
150
151 /* Tell the master cpu we're here */
152 /* Relocation is off & we are located at an address less */
153 /* than 0x100, so only need to grab low order offset. */
154 std r24,__secondary_hold_acknowledge@l(0)
155 sync
156
157 /* All secondary cpu's wait here until told to start. */
158100: ld r4,__secondary_hold_spinloop@l(0)
159 cmpdi 0,r4,1
160 bne 100b
161
162#ifdef CONFIG_HMT
163 b .hmt_init
164#else
165#ifdef CONFIG_SMP
166 mr r3,r24
167 b .pSeries_secondary_smp_init
168#else
169 BUG_OPCODE
170#endif
171#endif
172#endif
173
174/* This value is used to mark exception frames on the stack. */
175 .section ".toc","aw"
176exception_marker:
177 .tc ID_72656773_68657265[TC],0x7265677368657265
178 .text
179
180/*
181 * The following macros define the code that appears as
182 * the prologue to each of the exception handlers. They
183 * are split into two parts to allow a single kernel binary
184 * to be used for pSeries and iSeries.
185 * LOL. One day... - paulus
186 */
187
188/*
189 * We make as much of the exception code common between native
190 * exception handlers (including pSeries LPAR) and iSeries LPAR
191 * implementations as possible.
192 */
193
194/*
195 * This is the start of the interrupt handlers for pSeries
196 * This code runs with relocation off.
197 */
198#define EX_R9 0
199#define EX_R10 8
200#define EX_R11 16
201#define EX_R12 24
202#define EX_R13 32
203#define EX_SRR0 40
204#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
205#define EX_DAR 48
206#define EX_LR 48 /* SLB miss saves LR, but not DAR */
207#define EX_DSISR 56
208#define EX_CCR 60
209
210#define EXCEPTION_PROLOG_PSERIES(area, label) \
211 mfspr r13,SPRG3; /* get paca address into r13 */ \
212 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
213 std r10,area+EX_R10(r13); \
214 std r11,area+EX_R11(r13); \
215 std r12,area+EX_R12(r13); \
216 mfspr r9,SPRG1; \
217 std r9,area+EX_R13(r13); \
218 mfcr r9; \
219 clrrdi r12,r13,32; /* get high part of &label */ \
220 mfmsr r10; \
221 mfspr r11,SRR0; /* save SRR0 */ \
222 ori r12,r12,(label)@l; /* virt addr of handler */ \
223 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
224 mtspr SRR0,r12; \
225 mfspr r12,SRR1; /* and SRR1 */ \
226 mtspr SRR1,r10; \
227 rfid; \
228 b . /* prevent speculative execution */
229
230/*
231 * This is the start of the interrupt handlers for iSeries
232 * This code runs with relocation on.
233 */
234#define EXCEPTION_PROLOG_ISERIES_1(area) \
235 mfspr r13,SPRG3; /* get paca address into r13 */ \
236 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
237 std r10,area+EX_R10(r13); \
238 std r11,area+EX_R11(r13); \
239 std r12,area+EX_R12(r13); \
240 mfspr r9,SPRG1; \
241 std r9,area+EX_R13(r13); \
242 mfcr r9
243
244#define EXCEPTION_PROLOG_ISERIES_2 \
245 mfmsr r10; \
246 ld r11,PACALPPACA+LPPACASRR0(r13); \
247 ld r12,PACALPPACA+LPPACASRR1(r13); \
248 ori r10,r10,MSR_RI; \
249 mtmsrd r10,1
250
251/*
252 * The common exception prolog is used for all except a few exceptions
253 * such as a segment miss on a kernel address. We have to be prepared
254 * to take another exception from the point where we first touch the
255 * kernel stack onwards.
256 *
257 * On entry r13 points to the paca, r9-r13 are saved in the paca,
258 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
259 * SRR1, and relocation is on.
260 */
261#define EXCEPTION_PROLOG_COMMON(n, area) \
262 andi. r10,r12,MSR_PR; /* See if coming from user */ \
263 mr r10,r1; /* Save r1 */ \
264 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
265 beq- 1f; \
266 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2671: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
268 bge- cr1,bad_stack; /* abort if it is */ \
269 std r9,_CCR(r1); /* save CR in stackframe */ \
270 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
271 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
272 std r10,0(r1); /* make stack chain pointer */ \
273 std r0,GPR0(r1); /* save r0 in stackframe */ \
274 std r10,GPR1(r1); /* save r1 in stackframe */ \
275 std r2,GPR2(r1); /* save r2 in stackframe */ \
276 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
277 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
278 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
279 ld r10,area+EX_R10(r13); \
280 std r9,GPR9(r1); \
281 std r10,GPR10(r1); \
282 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
283 ld r10,area+EX_R12(r13); \
284 ld r11,area+EX_R13(r13); \
285 std r9,GPR11(r1); \
286 std r10,GPR12(r1); \
287 std r11,GPR13(r1); \
288 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
289 mflr r9; /* save LR in stackframe */ \
290 std r9,_LINK(r1); \
291 mfctr r10; /* save CTR in stackframe */ \
292 std r10,_CTR(r1); \
293 mfspr r11,XER; /* save XER in stackframe */ \
294 std r11,_XER(r1); \
295 li r9,(n)+1; \
296 std r9,_TRAP(r1); /* set trap number */ \
297 li r10,0; \
298 ld r11,exception_marker@toc(r2); \
299 std r10,RESULT(r1); /* clear regs->result */ \
300 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
301
302/*
303 * Exception vectors.
304 */
305#define STD_EXCEPTION_PSERIES(n, label) \
306 . = n; \
307 .globl label##_pSeries; \
308label##_pSeries: \
309 HMT_MEDIUM; \
310 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700311 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
313
314#define STD_EXCEPTION_ISERIES(n, label, area) \
315 .globl label##_iSeries; \
316label##_iSeries: \
317 HMT_MEDIUM; \
318 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700319 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 EXCEPTION_PROLOG_ISERIES_1(area); \
321 EXCEPTION_PROLOG_ISERIES_2; \
322 b label##_common
323
324#define MASKABLE_EXCEPTION_ISERIES(n, label) \
325 .globl label##_iSeries; \
326label##_iSeries: \
327 HMT_MEDIUM; \
328 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700329 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
331 lbz r10,PACAPROCENABLED(r13); \
332 cmpwi 0,r10,0; \
333 beq- label##_iSeries_masked; \
334 EXCEPTION_PROLOG_ISERIES_2; \
335 b label##_common; \
336
337#ifdef DO_SOFT_DISABLE
338#define DISABLE_INTS \
339 lbz r10,PACAPROCENABLED(r13); \
340 li r11,0; \
341 std r10,SOFTE(r1); \
342 mfmsr r10; \
343 stb r11,PACAPROCENABLED(r13); \
344 ori r10,r10,MSR_EE; \
345 mtmsrd r10,1
346
347#define ENABLE_INTS \
348 lbz r10,PACAPROCENABLED(r13); \
349 mfmsr r11; \
350 std r10,SOFTE(r1); \
351 ori r11,r11,MSR_EE; \
352 mtmsrd r11,1
353
354#else /* hard enable/disable interrupts */
355#define DISABLE_INTS
356
357#define ENABLE_INTS \
358 ld r12,_MSR(r1); \
359 mfmsr r11; \
360 rlwimi r11,r12,0,MSR_EE; \
361 mtmsrd r11,1
362
363#endif
364
365#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
366 .align 7; \
367 .globl label##_common; \
368label##_common: \
369 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
370 DISABLE_INTS; \
371 bl .save_nvgprs; \
372 addi r3,r1,STACK_FRAME_OVERHEAD; \
373 bl hdlr; \
374 b .ret_from_except
375
376#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
377 .align 7; \
378 .globl label##_common; \
379label##_common: \
380 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
381 DISABLE_INTS; \
382 addi r3,r1,STACK_FRAME_OVERHEAD; \
383 bl hdlr; \
384 b .ret_from_except_lite
385
386/*
387 * Start of pSeries system interrupt routines
388 */
389 . = 0x100
390 .globl __start_interrupts
391__start_interrupts:
392
393 STD_EXCEPTION_PSERIES(0x100, system_reset)
394
395 . = 0x200
396_machine_check_pSeries:
397 HMT_MEDIUM
398 mtspr SPRG1,r13 /* save r13 */
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700399 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
401
402 . = 0x300
403 .globl data_access_pSeries
404data_access_pSeries:
405 HMT_MEDIUM
406 mtspr SPRG1,r13
407BEGIN_FTR_SECTION
408 mtspr SPRG2,r12
409 mfspr r13,DAR
410 mfspr r12,DSISR
411 srdi r13,r13,60
412 rlwimi r13,r12,16,0x20
413 mfcr r12
414 cmpwi r13,0x2c
415 beq .do_stab_bolted_pSeries
416 mtcrf 0x80,r12
417 mfspr r12,SPRG2
418END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
419 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
420
421 . = 0x380
422 .globl data_access_slb_pSeries
423data_access_slb_pSeries:
424 HMT_MEDIUM
425 mtspr SPRG1,r13
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700426 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 mfspr r13,SPRG3 /* get paca address into r13 */
428 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
429 std r10,PACA_EXSLB+EX_R10(r13)
430 std r11,PACA_EXSLB+EX_R11(r13)
431 std r12,PACA_EXSLB+EX_R12(r13)
432 std r3,PACA_EXSLB+EX_R3(r13)
433 mfspr r9,SPRG1
434 std r9,PACA_EXSLB+EX_R13(r13)
435 mfcr r9
436 mfspr r12,SRR1 /* and SRR1 */
437 mfspr r3,DAR
438 b .do_slb_miss /* Rel. branch works in real mode */
439
440 STD_EXCEPTION_PSERIES(0x400, instruction_access)
441
442 . = 0x480
443 .globl instruction_access_slb_pSeries
444instruction_access_slb_pSeries:
445 HMT_MEDIUM
446 mtspr SPRG1,r13
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700447 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 mfspr r13,SPRG3 /* get paca address into r13 */
449 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
450 std r10,PACA_EXSLB+EX_R10(r13)
451 std r11,PACA_EXSLB+EX_R11(r13)
452 std r12,PACA_EXSLB+EX_R12(r13)
453 std r3,PACA_EXSLB+EX_R3(r13)
454 mfspr r9,SPRG1
455 std r9,PACA_EXSLB+EX_R13(r13)
456 mfcr r9
457 mfspr r12,SRR1 /* and SRR1 */
458 mfspr r3,SRR0 /* SRR0 is faulting address */
459 b .do_slb_miss /* Rel. branch works in real mode */
460
461 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
462 STD_EXCEPTION_PSERIES(0x600, alignment)
463 STD_EXCEPTION_PSERIES(0x700, program_check)
464 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
465 STD_EXCEPTION_PSERIES(0x900, decrementer)
466 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
467 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
468
469 . = 0xc00
470 .globl system_call_pSeries
471system_call_pSeries:
472 HMT_MEDIUM
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700473 RUNLATCH_ON(r9)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 mr r9,r13
475 mfmsr r10
476 mfspr r13,SPRG3
477 mfspr r11,SRR0
478 clrrdi r12,r13,32
479 oris r12,r12,system_call_common@h
480 ori r12,r12,system_call_common@l
481 mtspr SRR0,r12
482 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
483 mfspr r12,SRR1
484 mtspr SRR1,r10
485 rfid
486 b . /* prevent speculative execution */
487
488 STD_EXCEPTION_PSERIES(0xd00, single_step)
489 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
490
491 /* We need to deal with the Altivec unavailable exception
492 * here which is at 0xf20, thus in the middle of the
493 * prolog code of the PerformanceMonitor one. A little
494 * trickery is thus necessary
495 */
496 . = 0xf00
497 b performance_monitor_pSeries
498
499 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
500
501 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
502 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
503
504 /* moved from 0xf00 */
505 STD_EXCEPTION_PSERIES(0x3000, performance_monitor)
506
507 . = 0x3100
508_GLOBAL(do_stab_bolted_pSeries)
509 mtcrf 0x80,r12
510 mfspr r12,SPRG2
511 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
514 . = 0x6100
515
David Gibson2e2446e2005-08-19 14:52:31 +1000516#ifdef CONFIG_PPC_ISERIES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517/*** ISeries-LPAR interrupt handlers ***/
518
519 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
520
521 .globl data_access_iSeries
522data_access_iSeries:
523 mtspr SPRG1,r13
524BEGIN_FTR_SECTION
525 mtspr SPRG2,r12
526 mfspr r13,DAR
527 mfspr r12,DSISR
528 srdi r13,r13,60
529 rlwimi r13,r12,16,0x20
530 mfcr r12
531 cmpwi r13,0x2c
532 beq .do_stab_bolted_iSeries
533 mtcrf 0x80,r12
534 mfspr r12,SPRG2
535END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
536 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
537 EXCEPTION_PROLOG_ISERIES_2
538 b data_access_common
539
540.do_stab_bolted_iSeries:
541 mtcrf 0x80,r12
542 mfspr r12,SPRG2
543 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
544 EXCEPTION_PROLOG_ISERIES_2
545 b .do_stab_bolted
546
547 .globl data_access_slb_iSeries
548data_access_slb_iSeries:
549 mtspr SPRG1,r13 /* save r13 */
550 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
551 std r3,PACA_EXSLB+EX_R3(r13)
552 ld r12,PACALPPACA+LPPACASRR1(r13)
553 mfspr r3,DAR
554 b .do_slb_miss
555
556 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
557
558 .globl instruction_access_slb_iSeries
559instruction_access_slb_iSeries:
560 mtspr SPRG1,r13 /* save r13 */
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 std r3,PACA_EXSLB+EX_R3(r13)
563 ld r12,PACALPPACA+LPPACASRR1(r13)
564 ld r3,PACALPPACA+LPPACASRR0(r13)
565 b .do_slb_miss
566
567 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
568 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
569 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
570 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
571 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
572 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
573 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
574
575 .globl system_call_iSeries
576system_call_iSeries:
577 mr r9,r13
578 mfspr r13,SPRG3
579 EXCEPTION_PROLOG_ISERIES_2
580 b system_call_common
581
582 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
583 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
584 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
585
586 .globl system_reset_iSeries
587system_reset_iSeries:
588 mfspr r13,SPRG3 /* Get paca address */
589 mfmsr r24
590 ori r24,r24,MSR_RI
591 mtmsrd r24 /* RI on */
592 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
593 cmpwi 0,r24,0 /* Are we processor 0? */
594 beq .__start_initialization_iSeries /* Start up the first processor */
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -0700595 mfspr r4,SPRN_CTRLF
596 li r5,CTRL_RUNLATCH /* Turn off the run light */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 andc r4,r4,r5
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -0700598 mtspr SPRN_CTRLT,r4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
6001:
601 HMT_LOW
602#ifdef CONFIG_SMP
603 lbz r23,PACAPROCSTART(r13) /* Test if this processor
604 * should start */
605 sync
606 LOADADDR(r3,current_set)
607 sldi r28,r24,3 /* get current_set[cpu#] */
608 ldx r3,r3,r28
609 addi r1,r3,THREAD_SIZE
610 subi r1,r1,STACK_FRAME_OVERHEAD
611
612 cmpwi 0,r23,0
613 beq iSeries_secondary_smp_loop /* Loop until told to go */
614#ifdef SECONDARY_PROCESSORS
615 bne .__secondary_start /* Loop until told to go */
616#endif
617iSeries_secondary_smp_loop:
618 /* Let the Hypervisor know we are alive */
619 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
620 lis r3,0x8002
621 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
622#else /* CONFIG_SMP */
623 /* Yield the processor. This is required for non-SMP kernels
624 which are running on multi-threaded machines. */
625 lis r3,0x8000
626 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
627 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
628 li r4,0 /* "yield timed" */
629 li r5,-1 /* "yield forever" */
630#endif /* CONFIG_SMP */
631 li r0,-1 /* r0=-1 indicates a Hypervisor call */
632 sc /* Invoke the hypervisor via a system call */
633 mfspr r13,SPRG3 /* Put r13 back ???? */
634 b 1b /* If SMP not configured, secondaries
635 * loop forever */
636
637 .globl decrementer_iSeries_masked
638decrementer_iSeries_masked:
639 li r11,1
640 stb r11,PACALPPACA+LPPACADECRINT(r13)
641 lwz r12,PACADEFAULTDECR(r13)
642 mtspr SPRN_DEC,r12
643 /* fall through */
644
645 .globl hardware_interrupt_iSeries_masked
646hardware_interrupt_iSeries_masked:
647 mtcrf 0x80,r9 /* Restore regs */
648 ld r11,PACALPPACA+LPPACASRR0(r13)
649 ld r12,PACALPPACA+LPPACASRR1(r13)
650 mtspr SRR0,r11
651 mtspr SRR1,r12
652 ld r9,PACA_EXGEN+EX_R9(r13)
653 ld r10,PACA_EXGEN+EX_R10(r13)
654 ld r11,PACA_EXGEN+EX_R11(r13)
655 ld r12,PACA_EXGEN+EX_R12(r13)
656 ld r13,PACA_EXGEN+EX_R13(r13)
657 rfid
658 b . /* prevent speculative execution */
659#endif
660
661/*
662 * Data area reserved for FWNMI option.
663 */
664 .= 0x7000
665 .globl fwnmi_data_area
666fwnmi_data_area:
667
Stephen Rothwell2ad56492005-08-17 13:01:50 +1000668#ifdef CONFIG_PPC_ISERIES
669 . = LPARMAP_PHYS
670#include "lparmap.s"
671#endif /* CONFIG_PPC_ISERIES */
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673/*
674 * Vectors for the FWNMI option. Share common code.
675 */
676 . = 0x8000
677 .globl system_reset_fwnmi
678system_reset_fwnmi:
679 HMT_MEDIUM
680 mtspr SPRG1,r13 /* save r13 */
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700681 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
683 .globl machine_check_fwnmi
684machine_check_fwnmi:
685 HMT_MEDIUM
686 mtspr SPRG1,r13 /* save r13 */
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700687 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
689
690 /*
691 * Space for the initial segment table
692 * For LPAR, the hypervisor must fill in at least one entry
693 * before we get control (with relocate on)
694 */
695 . = STAB0_PHYS_ADDR
696 .globl __start_stab
697__start_stab:
698
699 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
700 .globl __end_stab
701__end_stab:
702
703
704/*** Common interrupt handlers ***/
705
706 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
707
708 /*
709 * Machine check is different because we use a different
710 * save area: PACA_EXMC instead of PACA_EXGEN.
711 */
712 .align 7
713 .globl machine_check_common
714machine_check_common:
715 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
716 DISABLE_INTS
717 bl .save_nvgprs
718 addi r3,r1,STACK_FRAME_OVERHEAD
719 bl .machine_check_exception
720 b .ret_from_except
721
722 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
723 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
724 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
725 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
726 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
727 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
728 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
729#ifdef CONFIG_ALTIVEC
730 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
731#else
732 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
733#endif
734
735/*
736 * Here we have detected that the kernel stack pointer is bad.
737 * R9 contains the saved CR, r13 points to the paca,
738 * r10 contains the (bad) kernel stack pointer,
739 * r11 and r12 contain the saved SRR0 and SRR1.
740 * We switch to using the paca guard page as an emergency stack,
741 * save the registers there, and call kernel_bad_stack(), which panics.
742 */
743bad_stack:
744 ld r1,PACAEMERGSP(r13)
745 subi r1,r1,64+INT_FRAME_SIZE
746 std r9,_CCR(r1)
747 std r10,GPR1(r1)
748 std r11,_NIP(r1)
749 std r12,_MSR(r1)
750 mfspr r11,DAR
751 mfspr r12,DSISR
752 std r11,_DAR(r1)
753 std r12,_DSISR(r1)
754 mflr r10
755 mfctr r11
756 mfxer r12
757 std r10,_LINK(r1)
758 std r11,_CTR(r1)
759 std r12,_XER(r1)
760 SAVE_GPR(0,r1)
761 SAVE_GPR(2,r1)
762 SAVE_4GPRS(3,r1)
763 SAVE_2GPRS(7,r1)
764 SAVE_10GPRS(12,r1)
765 SAVE_10GPRS(22,r1)
766 addi r11,r1,INT_FRAME_SIZE
767 std r11,0(r1)
768 li r12,0
769 std r12,0(r11)
770 ld r2,PACATOC(r13)
7711: addi r3,r1,STACK_FRAME_OVERHEAD
772 bl .kernel_bad_stack
773 b 1b
774
775/*
776 * Return from an exception with minimal checks.
777 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
778 * If interrupts have been enabled, or anything has been
779 * done that might have changed the scheduling status of
780 * any task or sent any task a signal, you should use
781 * ret_from_except or ret_from_except_lite instead of this.
782 */
783fast_exception_return:
784 ld r12,_MSR(r1)
785 ld r11,_NIP(r1)
786 andi. r3,r12,MSR_RI /* check if RI is set */
787 beq- unrecov_fer
788 ld r3,_CCR(r1)
789 ld r4,_LINK(r1)
790 ld r5,_CTR(r1)
791 ld r6,_XER(r1)
792 mtcr r3
793 mtlr r4
794 mtctr r5
795 mtxer r6
796 REST_GPR(0, r1)
797 REST_8GPRS(2, r1)
798
799 mfmsr r10
800 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
801 mtmsrd r10,1
802
803 mtspr SRR1,r12
804 mtspr SRR0,r11
805 REST_4GPRS(10, r1)
806 ld r1,GPR1(r1)
807 rfid
808 b . /* prevent speculative execution */
809
810unrecov_fer:
811 bl .save_nvgprs
8121: addi r3,r1,STACK_FRAME_OVERHEAD
813 bl .unrecoverable_exception
814 b 1b
815
816/*
817 * Here r13 points to the paca, r9 contains the saved CR,
818 * SRR0 and SRR1 are saved in r11 and r12,
819 * r9 - r13 are saved in paca->exgen.
820 */
821 .align 7
822 .globl data_access_common
823data_access_common:
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700824 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 mfspr r10,DAR
826 std r10,PACA_EXGEN+EX_DAR(r13)
827 mfspr r10,DSISR
828 stw r10,PACA_EXGEN+EX_DSISR(r13)
829 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
830 ld r3,PACA_EXGEN+EX_DAR(r13)
831 lwz r4,PACA_EXGEN+EX_DSISR(r13)
832 li r5,0x300
833 b .do_hash_page /* Try to handle as hpte fault */
834
835 .align 7
836 .globl instruction_access_common
837instruction_access_common:
838 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
839 ld r3,_NIP(r1)
840 andis. r4,r12,0x5820
841 li r5,0x400
842 b .do_hash_page /* Try to handle as hpte fault */
843
844 .align 7
845 .globl hardware_interrupt_common
846 .globl hardware_interrupt_entry
847hardware_interrupt_common:
848 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
849hardware_interrupt_entry:
850 DISABLE_INTS
851 addi r3,r1,STACK_FRAME_OVERHEAD
852 bl .do_IRQ
853 b .ret_from_except_lite
854
855 .align 7
856 .globl alignment_common
857alignment_common:
858 mfspr r10,DAR
859 std r10,PACA_EXGEN+EX_DAR(r13)
860 mfspr r10,DSISR
861 stw r10,PACA_EXGEN+EX_DSISR(r13)
862 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
863 ld r3,PACA_EXGEN+EX_DAR(r13)
864 lwz r4,PACA_EXGEN+EX_DSISR(r13)
865 std r3,_DAR(r1)
866 std r4,_DSISR(r1)
867 bl .save_nvgprs
868 addi r3,r1,STACK_FRAME_OVERHEAD
869 ENABLE_INTS
870 bl .alignment_exception
871 b .ret_from_except
872
873 .align 7
874 .globl program_check_common
875program_check_common:
876 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
877 bl .save_nvgprs
878 addi r3,r1,STACK_FRAME_OVERHEAD
879 ENABLE_INTS
880 bl .program_check_exception
881 b .ret_from_except
882
883 .align 7
884 .globl fp_unavailable_common
885fp_unavailable_common:
886 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
887 bne .load_up_fpu /* if from user, just load it up */
888 bl .save_nvgprs
889 addi r3,r1,STACK_FRAME_OVERHEAD
890 ENABLE_INTS
891 bl .kernel_fp_unavailable_exception
892 BUG_OPCODE
893
894 .align 7
895 .globl altivec_unavailable_common
896altivec_unavailable_common:
897 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
898#ifdef CONFIG_ALTIVEC
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700899BEGIN_FTR_SECTION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 bne .load_up_altivec /* if from user, just load it up */
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700901END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902#endif
903 bl .save_nvgprs
904 addi r3,r1,STACK_FRAME_OVERHEAD
905 ENABLE_INTS
906 bl .altivec_unavailable_exception
907 b .ret_from_except
908
909/*
910 * Hash table stuff
911 */
912 .align 7
913_GLOBAL(do_hash_page)
914 std r3,_DAR(r1)
915 std r4,_DSISR(r1)
916
917 andis. r0,r4,0xa450 /* weird error? */
918 bne- .handle_page_fault /* if not, try to insert a HPTE */
919BEGIN_FTR_SECTION
920 andis. r0,r4,0x0020 /* Is it a segment table fault? */
921 bne- .do_ste_alloc /* If so handle it */
922END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
923
924 /*
925 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
926 * accessing a userspace segment (even from the kernel). We assume
927 * kernel addresses always have the high bit set.
928 */
929 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
930 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
931 orc r0,r12,r0 /* MSR_PR | ~high_bit */
932 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
933 ori r4,r4,1 /* add _PAGE_PRESENT */
934 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
935
936 /*
937 * On iSeries, we soft-disable interrupts here, then
938 * hard-enable interrupts so that the hash_page code can spin on
939 * the hash_table_lock without problems on a shared processor.
940 */
941 DISABLE_INTS
942
943 /*
944 * r3 contains the faulting address
945 * r4 contains the required access permissions
946 * r5 contains the trap number
947 *
948 * at return r3 = 0 for success
949 */
950 bl .hash_page /* build HPTE if possible */
951 cmpdi r3,0 /* see if hash_page succeeded */
952
953#ifdef DO_SOFT_DISABLE
954 /*
955 * If we had interrupts soft-enabled at the point where the
956 * DSI/ISI occurred, and an interrupt came in during hash_page,
957 * handle it now.
958 * We jump to ret_from_except_lite rather than fast_exception_return
959 * because ret_from_except_lite will check for and handle pending
960 * interrupts if necessary.
961 */
962 beq .ret_from_except_lite
963 /* For a hash failure, we don't bother re-enabling interrupts */
964 ble- 12f
965
966 /*
967 * hash_page couldn't handle it, set soft interrupt enable back
968 * to what it was before the trap. Note that .local_irq_restore
969 * handles any interrupts pending at this point.
970 */
971 ld r3,SOFTE(r1)
972 bl .local_irq_restore
973 b 11f
974#else
975 beq fast_exception_return /* Return from exception on success */
976 ble- 12f /* Failure return from hash_page */
977
978 /* fall through */
979#endif
980
981/* Here we have a page fault that hash_page can't handle. */
982_GLOBAL(handle_page_fault)
983 ENABLE_INTS
98411: ld r4,_DAR(r1)
985 ld r5,_DSISR(r1)
986 addi r3,r1,STACK_FRAME_OVERHEAD
987 bl .do_page_fault
988 cmpdi r3,0
989 beq+ .ret_from_except_lite
990 bl .save_nvgprs
991 mr r5,r3
992 addi r3,r1,STACK_FRAME_OVERHEAD
993 lwz r4,_DAR(r1)
994 bl .bad_page_fault
995 b .ret_from_except
996
997/* We have a page fault that hash_page could handle but HV refused
998 * the PTE insertion
999 */
100012: bl .save_nvgprs
1001 addi r3,r1,STACK_FRAME_OVERHEAD
1002 lwz r4,_DAR(r1)
1003 bl .low_hash_fault
1004 b .ret_from_except
1005
1006 /* here we have a segment miss */
1007_GLOBAL(do_ste_alloc)
1008 bl .ste_allocate /* try to insert stab entry */
1009 cmpdi r3,0
1010 beq+ fast_exception_return
1011 b .handle_page_fault
1012
1013/*
1014 * r13 points to the PACA, r9 contains the saved CR,
1015 * r11 and r12 contain the saved SRR0 and SRR1.
1016 * r9 - r13 are saved in paca->exslb.
1017 * We assume we aren't going to take any exceptions during this procedure.
1018 * We assume (DAR >> 60) == 0xc.
1019 */
1020 .align 7
1021_GLOBAL(do_stab_bolted)
1022 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1023 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1024
1025 /* Hash to the primary group */
1026 ld r10,PACASTABVIRT(r13)
1027 mfspr r11,DAR
1028 srdi r11,r11,28
1029 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1030
1031 /* Calculate VSID */
1032 /* This is a kernel address, so protovsid = ESID */
1033 ASM_VSID_SCRAMBLE(r11, r9)
1034 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1035
1036 /* Search the primary group for a free entry */
10371: ld r11,0(r10) /* Test valid bit of the current ste */
1038 andi. r11,r11,0x80
1039 beq 2f
1040 addi r10,r10,16
1041 andi. r11,r10,0x70
1042 bne 1b
1043
1044 /* Stick for only searching the primary group for now. */
1045 /* At least for now, we use a very simple random castout scheme */
1046 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1047 mftb r11
1048 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1049 ori r11,r11,0x10
1050
1051 /* r10 currently points to an ste one past the group of interest */
1052 /* make it point to the randomly selected entry */
1053 subi r10,r10,128
1054 or r10,r10,r11 /* r10 is the entry to invalidate */
1055
1056 isync /* mark the entry invalid */
1057 ld r11,0(r10)
1058 rldicl r11,r11,56,1 /* clear the valid bit */
1059 rotldi r11,r11,8
1060 std r11,0(r10)
1061 sync
1062
1063 clrrdi r11,r11,28 /* Get the esid part of the ste */
1064 slbie r11
1065
10662: std r9,8(r10) /* Store the vsid part of the ste */
1067 eieio
1068
1069 mfspr r11,DAR /* Get the new esid */
1070 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1071 ori r11,r11,0x90 /* Turn on valid and kp */
1072 std r11,0(r10) /* Put new entry back into the stab */
1073
1074 sync
1075
1076 /* All done -- return from exception. */
1077 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1078 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1079
1080 andi. r10,r12,MSR_RI
1081 beq- unrecov_slb
1082
1083 mtcrf 0x80,r9 /* restore CR */
1084
1085 mfmsr r10
1086 clrrdi r10,r10,2
1087 mtmsrd r10,1
1088
1089 mtspr SRR0,r11
1090 mtspr SRR1,r12
1091 ld r9,PACA_EXSLB+EX_R9(r13)
1092 ld r10,PACA_EXSLB+EX_R10(r13)
1093 ld r11,PACA_EXSLB+EX_R11(r13)
1094 ld r12,PACA_EXSLB+EX_R12(r13)
1095 ld r13,PACA_EXSLB+EX_R13(r13)
1096 rfid
1097 b . /* prevent speculative execution */
1098
1099/*
1100 * r13 points to the PACA, r9 contains the saved CR,
1101 * r11 and r12 contain the saved SRR0 and SRR1.
1102 * r3 has the faulting address
1103 * r9 - r13 are saved in paca->exslb.
1104 * r3 is saved in paca->slb_r3
1105 * We assume we aren't going to take any exceptions during this procedure.
1106 */
1107_GLOBAL(do_slb_miss)
1108 mflr r10
1109
1110 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1111 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1112
1113 bl .slb_allocate /* handle it */
1114
1115 /* All done -- return from exception. */
1116
1117 ld r10,PACA_EXSLB+EX_LR(r13)
1118 ld r3,PACA_EXSLB+EX_R3(r13)
1119 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1120#ifdef CONFIG_PPC_ISERIES
1121 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1122#endif /* CONFIG_PPC_ISERIES */
1123
1124 mtlr r10
1125
1126 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1127 beq- unrecov_slb
1128
1129.machine push
1130.machine "power4"
1131 mtcrf 0x80,r9
1132 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1133.machine pop
1134
1135#ifdef CONFIG_PPC_ISERIES
1136 mtspr SRR0,r11
1137 mtspr SRR1,r12
1138#endif /* CONFIG_PPC_ISERIES */
1139 ld r9,PACA_EXSLB+EX_R9(r13)
1140 ld r10,PACA_EXSLB+EX_R10(r13)
1141 ld r11,PACA_EXSLB+EX_R11(r13)
1142 ld r12,PACA_EXSLB+EX_R12(r13)
1143 ld r13,PACA_EXSLB+EX_R13(r13)
1144 rfid
1145 b . /* prevent speculative execution */
1146
1147unrecov_slb:
1148 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1149 DISABLE_INTS
1150 bl .save_nvgprs
11511: addi r3,r1,STACK_FRAME_OVERHEAD
1152 bl .unrecoverable_exception
1153 b 1b
1154
1155
1156/*
1157 * On pSeries, secondary processors spin in the following code.
1158 * At entry, r3 = this processor's number (physical cpu id)
1159 */
1160_GLOBAL(pSeries_secondary_smp_init)
1161 mr r24,r3
1162
1163 /* turn on 64-bit mode */
1164 bl .enable_64b_mode
1165 isync
1166
1167 /* Copy some CPU settings from CPU 0 */
1168 bl .__restore_cpu_setup
1169
1170 /* Set up a paca value for this processor. Since we have the
R Sharadafce0d572005-06-25 14:58:10 -07001171 * physical cpu id in r24, we need to search the pacas to find
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 * which logical id maps to our physical one.
1173 */
1174 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1175 li r5,0 /* logical cpu id */
11761: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1177 cmpw r6,r24 /* Compare to our id */
1178 beq 2f
1179 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1180 addi r5,r5,1
1181 cmpwi r5,NR_CPUS
1182 blt 1b
1183
R Sharadafce0d572005-06-25 14:58:10 -07001184 mr r3,r24 /* not found, copy phys to r3 */
1185 b .kexec_wait /* next kernel might do better */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
11872: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1188 /* From now on, r24 is expected to be logica cpuid */
1189 mr r24,r5
11903: HMT_LOW
1191 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1192 /* start. */
1193 sync
1194
1195 /* Create a temp kernel stack for use before relocation is on. */
1196 ld r1,PACAEMERGSP(r13)
1197 subi r1,r1,STACK_FRAME_OVERHEAD
1198
1199 cmpwi 0,r23,0
1200#ifdef CONFIG_SMP
1201#ifdef SECONDARY_PROCESSORS
1202 bne .__secondary_start
1203#endif
1204#endif
1205 b 3b /* Loop until told to go */
1206
1207#ifdef CONFIG_PPC_ISERIES
1208_STATIC(__start_initialization_iSeries)
1209 /* Clear out the BSS */
1210 LOADADDR(r11,__bss_stop)
1211 LOADADDR(r8,__bss_start)
1212 sub r11,r11,r8 /* bss size */
1213 addi r11,r11,7 /* round up to an even double word */
1214 rldicl. r11,r11,61,3 /* shift right by 3 */
1215 beq 4f
1216 addi r8,r8,-8
1217 li r0,0
1218 mtctr r11 /* zero this many doublewords */
12193: stdu r0,8(r8)
1220 bdnz 3b
12214:
1222 LOADADDR(r1,init_thread_union)
1223 addi r1,r1,THREAD_SIZE
1224 li r0,0
1225 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1226
1227 LOADADDR(r3,cpu_specs)
1228 LOADADDR(r4,cur_cpu_spec)
1229 li r5,0
1230 bl .identify_cpu
1231
1232 LOADADDR(r2,__toc_start)
1233 addi r2,r2,0x4000
1234 addi r2,r2,0x4000
1235
1236 bl .iSeries_early_setup
1237
1238 /* relocation is on at this point */
1239
1240 b .start_here_common
1241#endif /* CONFIG_PPC_ISERIES */
1242
1243#ifdef CONFIG_PPC_MULTIPLATFORM
1244
1245_STATIC(__mmu_off)
1246 mfmsr r3
1247 andi. r0,r3,MSR_IR|MSR_DR
1248 beqlr
1249 andc r3,r3,r0
1250 mtspr SPRN_SRR0,r4
1251 mtspr SPRN_SRR1,r3
1252 sync
1253 rfid
1254 b . /* prevent speculative execution */
1255
1256
1257/*
1258 * Here is our main kernel entry point. We support currently 2 kind of entries
1259 * depending on the value of r5.
1260 *
1261 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1262 * in r3...r7
1263 *
1264 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1265 * DT block, r4 is a physical pointer to the kernel itself
1266 *
1267 */
1268_GLOBAL(__start_initialization_multiplatform)
1269 /*
1270 * Are we booted from a PROM Of-type client-interface ?
1271 */
1272 cmpldi cr0,r5,0
1273 bne .__boot_from_prom /* yes -> prom */
1274
1275 /* Save parameters */
1276 mr r31,r3
1277 mr r30,r4
1278
1279 /* Make sure we are running in 64 bits mode */
1280 bl .enable_64b_mode
1281
1282 /* Setup some critical 970 SPRs before switching MMU off */
1283 bl .__970_cpu_preinit
1284
1285 /* cpu # */
1286 li r24,0
1287
1288 /* Switch off MMU if not already */
1289 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1290 add r4,r4,r30
1291 bl .__mmu_off
1292 b .__after_prom_start
1293
1294_STATIC(__boot_from_prom)
1295 /* Save parameters */
1296 mr r31,r3
1297 mr r30,r4
1298 mr r29,r5
1299 mr r28,r6
1300 mr r27,r7
1301
1302 /* Make sure we are running in 64 bits mode */
1303 bl .enable_64b_mode
1304
1305 /* put a relocation offset into r3 */
1306 bl .reloc_offset
1307
1308 LOADADDR(r2,__toc_start)
1309 addi r2,r2,0x4000
1310 addi r2,r2,0x4000
1311
1312 /* Relocate the TOC from a virt addr to a real addr */
1313 sub r2,r2,r3
1314
1315 /* Restore parameters */
1316 mr r3,r31
1317 mr r4,r30
1318 mr r5,r29
1319 mr r6,r28
1320 mr r7,r27
1321
1322 /* Do all of the interaction with OF client interface */
1323 bl .prom_init
1324 /* We never return */
1325 trap
1326
1327/*
1328 * At this point, r3 contains the physical address we are running at,
1329 * returned by prom_init()
1330 */
1331_STATIC(__after_prom_start)
1332
1333/*
1334 * We need to run with __start at physical address 0.
1335 * This will leave some code in the first 256B of
1336 * real memory, which are reserved for software use.
1337 * The remainder of the first page is loaded with the fixed
1338 * interrupt vectors. The next two pages are filled with
1339 * unknown exception placeholders.
1340 *
1341 * Note: This process overwrites the OF exception vectors.
1342 * r26 == relocation offset
1343 * r27 == KERNELBASE
1344 */
1345 bl .reloc_offset
1346 mr r26,r3
1347 SET_REG_TO_CONST(r27,KERNELBASE)
1348
1349 li r3,0 /* target addr */
1350
1351 // XXX FIXME: Use phys returned by OF (r30)
1352 sub r4,r27,r26 /* source addr */
1353 /* current address of _start */
1354 /* i.e. where we are running */
1355 /* the source addr */
1356
1357 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1358 sub r5,r5,r27
1359
1360 li r6,0x100 /* Start offset, the first 0x100 */
1361 /* bytes were copied earlier. */
1362
1363 bl .copy_and_flush /* copy the first n bytes */
1364 /* this includes the code being */
1365 /* executed here. */
1366
1367 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1368 mtctr r0 /* that we just made/relocated */
1369 bctr
1370
13714: LOADADDR(r5,klimit)
1372 sub r5,r5,r26
1373 ld r5,0(r5) /* get the value of klimit */
1374 sub r5,r5,r27
1375 bl .copy_and_flush /* copy the rest */
1376 b .start_here_multiplatform
1377
1378#endif /* CONFIG_PPC_MULTIPLATFORM */
1379
1380/*
1381 * Copy routine used to copy the kernel to start at physical address 0
1382 * and flush and invalidate the caches as needed.
1383 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1384 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1385 *
1386 * Note: this routine *only* clobbers r0, r6 and lr
1387 */
1388_GLOBAL(copy_and_flush)
1389 addi r5,r5,-8
1390 addi r6,r6,-8
13914: li r0,16 /* Use the least common */
1392 /* denominator cache line */
1393 /* size. This results in */
1394 /* extra cache line flushes */
1395 /* but operation is correct. */
1396 /* Can't get cache line size */
1397 /* from NACA as it is being */
1398 /* moved too. */
1399
1400 mtctr r0 /* put # words/line in ctr */
14013: addi r6,r6,8 /* copy a cache line */
1402 ldx r0,r6,r4
1403 stdx r0,r6,r3
1404 bdnz 3b
1405 dcbst r6,r3 /* write it to memory */
1406 sync
1407 icbi r6,r3 /* flush the icache line */
1408 cmpld 0,r6,r5
1409 blt 4b
1410 sync
1411 addi r5,r5,8
1412 addi r6,r6,8
1413 blr
1414
1415.align 8
1416copy_to_here:
1417
1418/*
1419 * load_up_fpu(unused, unused, tsk)
1420 * Disable FP for the task which had the FPU previously,
1421 * and save its floating-point registers in its thread_struct.
1422 * Enables the FPU for use in the kernel on return.
1423 * On SMP we know the fpu is free, since we give it up every
1424 * switch (ie, no lazy save of the FP registers).
1425 * On entry: r13 == 'current' && last_task_used_math != 'current'
1426 */
1427_STATIC(load_up_fpu)
1428 mfmsr r5 /* grab the current MSR */
1429 ori r5,r5,MSR_FP
1430 mtmsrd r5 /* enable use of fpu now */
1431 isync
1432/*
1433 * For SMP, we don't do lazy FPU switching because it just gets too
1434 * horrendously complex, especially when a task switches from one CPU
1435 * to another. Instead we call giveup_fpu in switch_to.
1436 *
1437 */
1438#ifndef CONFIG_SMP
1439 ld r3,last_task_used_math@got(r2)
1440 ld r4,0(r3)
1441 cmpdi 0,r4,0
1442 beq 1f
1443 /* Save FP state to last_task_used_math's THREAD struct */
1444 addi r4,r4,THREAD
1445 SAVE_32FPRS(0, r4)
1446 mffs fr0
1447 stfd fr0,THREAD_FPSCR(r4)
1448 /* Disable FP for last_task_used_math */
1449 ld r5,PT_REGS(r4)
1450 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1451 li r6,MSR_FP|MSR_FE0|MSR_FE1
1452 andc r4,r4,r6
1453 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14541:
1455#endif /* CONFIG_SMP */
1456 /* enable use of FP after return */
1457 ld r4,PACACURRENT(r13)
1458 addi r5,r4,THREAD /* Get THREAD */
1459 ld r4,THREAD_FPEXC_MODE(r5)
1460 ori r12,r12,MSR_FP
1461 or r12,r12,r4
1462 std r12,_MSR(r1)
1463 lfd fr0,THREAD_FPSCR(r5)
1464 mtfsf 0xff,fr0
1465 REST_32FPRS(0, r5)
1466#ifndef CONFIG_SMP
1467 /* Update last_task_used_math to 'current' */
1468 subi r4,r5,THREAD /* Back to 'current' */
1469 std r4,0(r3)
1470#endif /* CONFIG_SMP */
1471 /* restore registers and return */
1472 b fast_exception_return
1473
1474/*
1475 * disable_kernel_fp()
1476 * Disable the FPU.
1477 */
1478_GLOBAL(disable_kernel_fp)
1479 mfmsr r3
1480 rldicl r0,r3,(63-MSR_FP_LG),1
1481 rldicl r3,r0,(MSR_FP_LG+1),0
1482 mtmsrd r3 /* disable use of fpu now */
1483 isync
1484 blr
1485
1486/*
1487 * giveup_fpu(tsk)
1488 * Disable FP for the task given as the argument,
1489 * and save the floating-point registers in its thread_struct.
1490 * Enables the FPU for use in the kernel on return.
1491 */
1492_GLOBAL(giveup_fpu)
1493 mfmsr r5
1494 ori r5,r5,MSR_FP
1495 mtmsrd r5 /* enable use of fpu now */
1496 isync
1497 cmpdi 0,r3,0
1498 beqlr- /* if no previous owner, done */
1499 addi r3,r3,THREAD /* want THREAD of task */
1500 ld r5,PT_REGS(r3)
1501 cmpdi 0,r5,0
1502 SAVE_32FPRS(0, r3)
1503 mffs fr0
1504 stfd fr0,THREAD_FPSCR(r3)
1505 beq 1f
1506 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1507 li r3,MSR_FP|MSR_FE0|MSR_FE1
1508 andc r4,r4,r3 /* disable FP for previous task */
1509 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15101:
1511#ifndef CONFIG_SMP
1512 li r5,0
1513 ld r4,last_task_used_math@got(r2)
1514 std r5,0(r4)
1515#endif /* CONFIG_SMP */
1516 blr
1517
1518
1519#ifdef CONFIG_ALTIVEC
1520
1521/*
1522 * load_up_altivec(unused, unused, tsk)
1523 * Disable VMX for the task which had it previously,
1524 * and save its vector registers in its thread_struct.
1525 * Enables the VMX for use in the kernel on return.
1526 * On SMP we know the VMX is free, since we give it up every
1527 * switch (ie, no lazy save of the vector registers).
1528 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1529 */
1530_STATIC(load_up_altivec)
1531 mfmsr r5 /* grab the current MSR */
1532 oris r5,r5,MSR_VEC@h
1533 mtmsrd r5 /* enable use of VMX now */
1534 isync
1535
1536/*
1537 * For SMP, we don't do lazy VMX switching because it just gets too
1538 * horrendously complex, especially when a task switches from one CPU
1539 * to another. Instead we call giveup_altvec in switch_to.
1540 * VRSAVE isn't dealt with here, that is done in the normal context
1541 * switch code. Note that we could rely on vrsave value to eventually
1542 * avoid saving all of the VREGs here...
1543 */
1544#ifndef CONFIG_SMP
1545 ld r3,last_task_used_altivec@got(r2)
1546 ld r4,0(r3)
1547 cmpdi 0,r4,0
1548 beq 1f
1549 /* Save VMX state to last_task_used_altivec's THREAD struct */
1550 addi r4,r4,THREAD
1551 SAVE_32VRS(0,r5,r4)
1552 mfvscr vr0
1553 li r10,THREAD_VSCR
1554 stvx vr0,r10,r4
1555 /* Disable VMX for last_task_used_altivec */
1556 ld r5,PT_REGS(r4)
1557 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1558 lis r6,MSR_VEC@h
1559 andc r4,r4,r6
1560 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15611:
1562#endif /* CONFIG_SMP */
1563 /* Hack: if we get an altivec unavailable trap with VRSAVE
1564 * set to all zeros, we assume this is a broken application
1565 * that fails to set it properly, and thus we switch it to
1566 * all 1's
1567 */
1568 mfspr r4,SPRN_VRSAVE
1569 cmpdi 0,r4,0
1570 bne+ 1f
1571 li r4,-1
1572 mtspr SPRN_VRSAVE,r4
15731:
1574 /* enable use of VMX after return */
1575 ld r4,PACACURRENT(r13)
1576 addi r5,r4,THREAD /* Get THREAD */
1577 oris r12,r12,MSR_VEC@h
1578 std r12,_MSR(r1)
1579 li r4,1
1580 li r10,THREAD_VSCR
1581 stw r4,THREAD_USED_VR(r5)
1582 lvx vr0,r10,r5
1583 mtvscr vr0
1584 REST_32VRS(0,r4,r5)
1585#ifndef CONFIG_SMP
1586 /* Update last_task_used_math to 'current' */
1587 subi r4,r5,THREAD /* Back to 'current' */
1588 std r4,0(r3)
1589#endif /* CONFIG_SMP */
1590 /* restore registers and return */
1591 b fast_exception_return
1592
1593/*
1594 * disable_kernel_altivec()
1595 * Disable the VMX.
1596 */
1597_GLOBAL(disable_kernel_altivec)
1598 mfmsr r3
1599 rldicl r0,r3,(63-MSR_VEC_LG),1
1600 rldicl r3,r0,(MSR_VEC_LG+1),0
1601 mtmsrd r3 /* disable use of VMX now */
1602 isync
1603 blr
1604
1605/*
1606 * giveup_altivec(tsk)
1607 * Disable VMX for the task given as the argument,
1608 * and save the vector registers in its thread_struct.
1609 * Enables the VMX for use in the kernel on return.
1610 */
1611_GLOBAL(giveup_altivec)
1612 mfmsr r5
1613 oris r5,r5,MSR_VEC@h
1614 mtmsrd r5 /* enable use of VMX now */
1615 isync
1616 cmpdi 0,r3,0
1617 beqlr- /* if no previous owner, done */
1618 addi r3,r3,THREAD /* want THREAD of task */
1619 ld r5,PT_REGS(r3)
1620 cmpdi 0,r5,0
1621 SAVE_32VRS(0,r4,r3)
1622 mfvscr vr0
1623 li r4,THREAD_VSCR
1624 stvx vr0,r4,r3
1625 beq 1f
1626 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1627 lis r3,MSR_VEC@h
1628 andc r4,r4,r3 /* disable FP for previous task */
1629 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16301:
1631#ifndef CONFIG_SMP
1632 li r5,0
1633 ld r4,last_task_used_altivec@got(r2)
1634 std r5,0(r4)
1635#endif /* CONFIG_SMP */
1636 blr
1637
1638#endif /* CONFIG_ALTIVEC */
1639
1640#ifdef CONFIG_SMP
1641#ifdef CONFIG_PPC_PMAC
1642/*
1643 * On PowerMac, secondary processors starts from the reset vector, which
1644 * is temporarily turned into a call to one of the functions below.
1645 */
1646 .section ".text";
1647 .align 2 ;
1648
1649 .globl pmac_secondary_start_1
1650pmac_secondary_start_1:
1651 li r24, 1
1652 b .pmac_secondary_start
1653
1654 .globl pmac_secondary_start_2
1655pmac_secondary_start_2:
1656 li r24, 2
1657 b .pmac_secondary_start
1658
1659 .globl pmac_secondary_start_3
1660pmac_secondary_start_3:
1661 li r24, 3
1662 b .pmac_secondary_start
1663
1664_GLOBAL(pmac_secondary_start)
1665 /* turn on 64-bit mode */
1666 bl .enable_64b_mode
1667 isync
1668
1669 /* Copy some CPU settings from CPU 0 */
1670 bl .__restore_cpu_setup
1671
1672 /* pSeries do that early though I don't think we really need it */
1673 mfmsr r3
1674 ori r3,r3,MSR_RI
1675 mtmsrd r3 /* RI on */
1676
1677 /* Set up a paca value for this processor. */
1678 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1679 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1680 add r13,r13,r4 /* for this processor. */
1681 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1682
1683 /* Create a temp kernel stack for use before relocation is on. */
1684 ld r1,PACAEMERGSP(r13)
1685 subi r1,r1,STACK_FRAME_OVERHEAD
1686
1687 b .__secondary_start
1688
1689#endif /* CONFIG_PPC_PMAC */
1690
1691/*
1692 * This function is called after the master CPU has released the
1693 * secondary processors. The execution environment is relocation off.
1694 * The paca for this processor has the following fields initialized at
1695 * this point:
1696 * 1. Processor number
1697 * 2. Segment table pointer (virtual address)
1698 * On entry the following are set:
1699 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1700 * r24 = cpu# (in Linux terms)
1701 * r13 = paca virtual address
1702 * SPRG3 = paca virtual address
1703 */
1704_GLOBAL(__secondary_start)
1705
1706 HMT_MEDIUM /* Set thread priority to MEDIUM */
1707
1708 ld r2,PACATOC(r13)
1709 li r6,0
1710 stb r6,PACAPROCENABLED(r13)
1711
1712#ifndef CONFIG_PPC_ISERIES
1713 /* Initialize the page table pointer register. */
1714 LOADADDR(r6,_SDR1)
1715 ld r6,0(r6) /* get the value of _SDR1 */
1716 mtspr SDR1,r6 /* set the htab location */
1717#endif
1718 /* Initialize the first segment table (or SLB) entry */
1719 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1720 bl .stab_initialize
1721
1722 /* Initialize the kernel stack. Just a repeat for iSeries. */
1723 LOADADDR(r3,current_set)
1724 sldi r28,r24,3 /* get current_set[cpu#] */
1725 ldx r1,r3,r28
1726 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1727 std r1,PACAKSAVE(r13)
1728
1729 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1730 ori r4,r3,1 /* turn on valid bit */
1731
1732#ifdef CONFIG_PPC_ISERIES
1733 li r0,-1 /* hypervisor call */
1734 li r3,1
1735 sldi r3,r3,63 /* 0x8000000000000000 */
1736 ori r3,r3,4 /* 0x8000000000000004 */
1737 sc /* HvCall_setASR */
1738#else
1739 /* set the ASR */
1740 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1741 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1742 cmpldi r3,PLATFORM_PSERIES_LPAR
1743 bne 98f
1744 mfspr r3,PVR
1745 srwi r3,r3,16
1746 cmpwi r3,0x37 /* SStar */
1747 beq 97f
1748 cmpwi r3,0x36 /* IStar */
1749 beq 97f
1750 cmpwi r3,0x34 /* Pulsar */
1751 bne 98f
175297: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1753 HVSC /* Invoking hcall */
1754 b 99f
175598: /* !(rpa hypervisor) || !(star) */
1756 mtasr r4 /* set the stab location */
175799:
1758#endif
1759 li r7,0
1760 mtlr r7
1761
1762 /* enable MMU and jump to start_secondary */
1763 LOADADDR(r3,.start_secondary_prolog)
1764 SET_REG_TO_CONST(r4, MSR_KERNEL)
1765#ifdef DO_SOFT_DISABLE
1766 ori r4,r4,MSR_EE
1767#endif
1768 mtspr SRR0,r3
1769 mtspr SRR1,r4
1770 rfid
1771 b . /* prevent speculative execution */
1772
1773/*
1774 * Running with relocation on at this point. All we want to do is
1775 * zero the stack back-chain pointer before going into C code.
1776 */
1777_GLOBAL(start_secondary_prolog)
1778 li r3,0
1779 std r3,0(r1) /* Zero the stack frame pointer */
1780 bl .start_secondary
1781#endif
1782
1783/*
1784 * This subroutine clobbers r11 and r12
1785 */
1786_GLOBAL(enable_64b_mode)
1787 mfmsr r11 /* grab the current MSR */
1788 li r12,1
1789 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1790 or r11,r11,r12
1791 li r12,1
1792 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1793 or r11,r11,r12
1794 mtmsrd r11
1795 isync
1796 blr
1797
1798#ifdef CONFIG_PPC_MULTIPLATFORM
1799/*
1800 * This is where the main kernel code starts.
1801 */
1802_STATIC(start_here_multiplatform)
1803 /* get a new offset, now that the kernel has moved. */
1804 bl .reloc_offset
1805 mr r26,r3
1806
1807 /* Clear out the BSS. It may have been done in prom_init,
1808 * already but that's irrelevant since prom_init will soon
1809 * be detached from the kernel completely. Besides, we need
1810 * to clear it now for kexec-style entry.
1811 */
1812 LOADADDR(r11,__bss_stop)
1813 LOADADDR(r8,__bss_start)
1814 sub r11,r11,r8 /* bss size */
1815 addi r11,r11,7 /* round up to an even double word */
1816 rldicl. r11,r11,61,3 /* shift right by 3 */
1817 beq 4f
1818 addi r8,r8,-8
1819 li r0,0
1820 mtctr r11 /* zero this many doublewords */
18213: stdu r0,8(r8)
1822 bdnz 3b
18234:
1824
1825 mfmsr r6
1826 ori r6,r6,MSR_RI
1827 mtmsrd r6 /* RI on */
1828
1829#ifdef CONFIG_HMT
1830 /* Start up the second thread on cpu 0 */
1831 mfspr r3,PVR
1832 srwi r3,r3,16
1833 cmpwi r3,0x34 /* Pulsar */
1834 beq 90f
1835 cmpwi r3,0x36 /* Icestar */
1836 beq 90f
1837 cmpwi r3,0x37 /* SStar */
1838 beq 90f
1839 b 91f /* HMT not supported */
184090: li r3,0
1841 bl .hmt_start_secondary
184291:
1843#endif
1844
1845 /* The following gets the stack and TOC set up with the regs */
1846 /* pointing to the real addr of the kernel stack. This is */
1847 /* all done to support the C function call below which sets */
1848 /* up the htab. This is done because we have relocated the */
1849 /* kernel but are still running in real mode. */
1850
1851 LOADADDR(r3,init_thread_union)
1852 sub r3,r3,r26
1853
1854 /* set up a stack pointer (physical address) */
1855 addi r1,r3,THREAD_SIZE
1856 li r0,0
1857 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1858
1859 /* set up the TOC (physical address) */
1860 LOADADDR(r2,__toc_start)
1861 addi r2,r2,0x4000
1862 addi r2,r2,0x4000
1863 sub r2,r2,r26
1864
1865 LOADADDR(r3,cpu_specs)
1866 sub r3,r3,r26
1867 LOADADDR(r4,cur_cpu_spec)
1868 sub r4,r4,r26
1869 mr r5,r26
1870 bl .identify_cpu
1871
1872 /* Save some low level config HIDs of CPU0 to be copied to
1873 * other CPUs later on, or used for suspend/resume
1874 */
1875 bl .__save_cpu_setup
1876 sync
1877
1878 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1879 * note that boot_cpuid can always be 0 nowadays since there is
1880 * nowhere it can be initialized differently before we reach this
1881 * code
1882 */
1883 LOADADDR(r27, boot_cpuid)
1884 sub r27,r27,r26
1885 lwz r27,0(r27)
1886
1887 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1888 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1889 add r13,r13,r24 /* for this processor. */
1890 sub r13,r13,r26 /* convert to physical addr */
1891 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1892
1893 /* Do very early kernel initializations, including initial hash table,
1894 * stab and slb setup before we turn on relocation. */
1895
1896 /* Restore parameters passed from prom_init/kexec */
1897 mr r3,r31
1898 bl .early_setup
1899
1900 /* set the ASR */
1901 ld r3,PACASTABREAL(r13)
1902 ori r4,r3,1 /* turn on valid bit */
1903 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1904 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1905 cmpldi r3,PLATFORM_PSERIES_LPAR
1906 bne 98f
1907 mfspr r3,PVR
1908 srwi r3,r3,16
1909 cmpwi r3,0x37 /* SStar */
1910 beq 97f
1911 cmpwi r3,0x36 /* IStar */
1912 beq 97f
1913 cmpwi r3,0x34 /* Pulsar */
1914 bne 98f
191597: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1916 HVSC /* Invoking hcall */
1917 b 99f
191898: /* !(rpa hypervisor) || !(star) */
1919 mtasr r4 /* set the stab location */
192099:
1921 /* Set SDR1 (hash table pointer) */
1922 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1923 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1924 /* Test if bit 0 is set (LPAR bit) */
1925 andi. r3,r3,0x1
1926 bne 98f
1927 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1928 sub r6,r6,r26
1929 ld r6,0(r6) /* get the value of _SDR1 */
1930 mtspr SDR1,r6 /* set the htab location */
193198:
1932 LOADADDR(r3,.start_here_common)
1933 SET_REG_TO_CONST(r4, MSR_KERNEL)
1934 mtspr SRR0,r3
1935 mtspr SRR1,r4
1936 rfid
1937 b . /* prevent speculative execution */
1938#endif /* CONFIG_PPC_MULTIPLATFORM */
1939
1940 /* This is where all platforms converge execution */
1941_STATIC(start_here_common)
1942 /* relocation is on at this point */
1943
1944 /* The following code sets up the SP and TOC now that we are */
1945 /* running with translation enabled. */
1946
1947 LOADADDR(r3,init_thread_union)
1948
1949 /* set up the stack */
1950 addi r1,r3,THREAD_SIZE
1951 li r0,0
1952 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1953
1954 /* Apply the CPUs-specific fixups (nop out sections not relevant
1955 * to this CPU
1956 */
1957 li r3,0
1958 bl .do_cpu_ftr_fixups
1959
1960 LOADADDR(r26, boot_cpuid)
1961 lwz r26,0(r26)
1962
1963 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1964 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1965 add r13,r13,r24 /* for this processor. */
1966 mtspr SPRG3,r13
1967
1968 /* ptr to current */
1969 LOADADDR(r4,init_task)
1970 std r4,PACACURRENT(r13)
1971
1972 /* Load the TOC */
1973 ld r2,PACATOC(r13)
1974 std r1,PACAKSAVE(r13)
1975
1976 bl .setup_system
1977
1978 /* Load up the kernel context */
19795:
1980#ifdef DO_SOFT_DISABLE
1981 li r5,0
1982 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1983 mfmsr r5
1984 ori r5,r5,MSR_EE /* Hard Enabled */
1985 mtmsrd r5
1986#endif
1987
1988 bl .start_kernel
1989
1990_GLOBAL(__setup_cpu_power3)
1991 blr
1992
1993_GLOBAL(hmt_init)
1994#ifdef CONFIG_HMT
1995 LOADADDR(r5, hmt_thread_data)
1996 mfspr r7,PVR
1997 srwi r7,r7,16
1998 cmpwi r7,0x34 /* Pulsar */
1999 beq 90f
2000 cmpwi r7,0x36 /* Icestar */
2001 beq 91f
2002 cmpwi r7,0x37 /* SStar */
2003 beq 91f
2004 b 101f
200590: mfspr r6,PIR
2006 andi. r6,r6,0x1f
2007 b 92f
200891: mfspr r6,PIR
2009 andi. r6,r6,0x3ff
201092: sldi r4,r24,3
2011 stwx r6,r5,r4
2012 bl .hmt_start_secondary
2013 b 101f
2014
2015__hmt_secondary_hold:
2016 LOADADDR(r5, hmt_thread_data)
2017 clrldi r5,r5,4
2018 li r7,0
2019 mfspr r6,PIR
2020 mfspr r8,PVR
2021 srwi r8,r8,16
2022 cmpwi r8,0x34
2023 bne 93f
2024 andi. r6,r6,0x1f
2025 b 103f
202693: andi. r6,r6,0x3f
2027
2028103: lwzx r8,r5,r7
2029 cmpw r8,r6
2030 beq 104f
2031 addi r7,r7,8
2032 b 103b
2033
2034104: addi r7,r7,4
2035 lwzx r9,r5,r7
2036 mr r24,r9
2037101:
2038#endif
2039 mr r3,r24
2040 b .pSeries_secondary_smp_init
2041
2042#ifdef CONFIG_HMT
2043_GLOBAL(hmt_start_secondary)
2044 LOADADDR(r4,__hmt_secondary_hold)
2045 clrldi r4,r4,4
2046 mtspr NIADORM, r4
2047 mfspr r4, MSRDORM
2048 li r5, -65
2049 and r4, r4, r5
2050 mtspr MSRDORM, r4
2051 lis r4,0xffef
2052 ori r4,r4,0x7403
2053 mtspr TSC, r4
2054 li r4,0x1f4
2055 mtspr TST, r4
2056 mfspr r4, HID0
2057 ori r4, r4, 0x1
2058 mtspr HID0, r4
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -07002059 mfspr r4, SPRN_CTRLF
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 oris r4, r4, 0x40
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -07002061 mtspr SPRN_CTRLT, r4
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 blr
2063#endif
2064
Olof Johansson75eedfe2005-08-04 12:53:29 -07002065#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066_GLOBAL(smp_release_cpus)
2067 /* All secondary cpus are spinning on a common
2068 * spinloop, release them all now so they can start
2069 * to spin on their individual paca spinloops.
2070 * For non SMP kernels, the secondary cpus never
2071 * get out of the common spinloop.
2072 */
2073 li r3,1
2074 LOADADDR(r5,__secondary_hold_spinloop)
2075 std r3,0(r5)
2076 sync
2077 blr
2078#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
2079
2080
2081/*
2082 * We put a few things here that have to be page-aligned.
2083 * This stuff goes at the beginning of the data segment,
2084 * which is page-aligned.
2085 */
2086 .data
2087 .align 12
2088 .globl sdata
2089sdata:
2090 .globl empty_zero_page
2091empty_zero_page:
2092 .space 4096
2093
2094 .globl swapper_pg_dir
2095swapper_pg_dir:
2096 .space 4096
2097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098/*
2099 * This space gets a copy of optional info passed to us by the bootstrap
2100 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2101 */
2102 .globl cmd_line
2103cmd_line:
2104 .space COMMAND_LINE_SIZE