blob: 7de38ebbe9738fe15b8e2cf824999c7f574c5673 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h>
29#include <linux/threads.h>
30#include <asm/processor.h>
31#include <asm/page.h>
32#include <asm/mmu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/systemcfg.h>
34#include <asm/ppc_asm.h>
35#include <asm/offsets.h>
36#include <asm/bug.h>
37#include <asm/cputable.h>
38#include <asm/setup.h>
39#include <asm/hvcall.h>
Stephen Rothwell2ad56492005-08-17 13:01:50 +100040#include <asm/iSeries/LparMap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#ifdef CONFIG_PPC_ISERIES
43#define DO_SOFT_DISABLE
44#endif
45
46/*
47 * hcall interface to pSeries LPAR
48 */
49#define H_SET_ASR 0x30
50
51/*
52 * We layout physical memory as follows:
53 * 0x0000 - 0x00ff : Secondary processor spin code
54 * 0x0100 - 0x2fff : pSeries Interrupt prologs
David Gibsonc59c4642005-08-19 14:52:31 +100055 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
56 * 0x6000 - 0x6fff : Initial (CPU0) segment table
David Gibsonec465512005-08-19 14:52:31 +100057 * 0x7000 - 0x7fff : FWNMI data area
David Gibsonc59c4642005-08-19 14:52:31 +100058 * 0x8000 - : Early init and support code
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 */
60
61/*
62 * SPRG Usage
63 *
64 * Register Definition
65 *
66 * SPRG0 reserved for hypervisor
67 * SPRG1 temp - used to save gpr
68 * SPRG2 temp - used to save gpr
69 * SPRG3 virt addr of paca
70 */
71
72/*
73 * Entering into this code we make the following assumptions:
74 * For pSeries:
75 * 1. The MMU is off & open firmware is running in real mode.
76 * 2. The kernel is entered at __start
77 *
78 * For iSeries:
79 * 1. The MMU is on (as it always is for iSeries)
80 * 2. The kernel is entered at system_reset_iSeries
81 */
82
83 .text
84 .globl _stext
85_stext:
86#ifdef CONFIG_PPC_MULTIPLATFORM
87_GLOBAL(__start)
88 /* NOP this out unconditionally */
89BEGIN_FTR_SECTION
90 b .__start_initialization_multiplatform
91END_FTR_SECTION(0, 1)
92#endif /* CONFIG_PPC_MULTIPLATFORM */
93
94 /* Catch branch to 0 in real mode */
95 trap
96#ifdef CONFIG_PPC_ISERIES
97 /*
98 * At offset 0x20, there is a pointer to iSeries LPAR data.
99 * This is required by the hypervisor
100 */
101 . = 0x20
102 .llong hvReleaseData-KERNELBASE
103
104 /*
105 * At offset 0x28 and 0x30 are offsets to the msChunks
106 * array (used by the iSeries LPAR debugger to do translation
107 * between physical addresses and absolute addresses) and
108 * to the pidhash table (also used by the debugger)
109 */
110 .llong msChunks-KERNELBASE
111 .llong 0 /* pidhash-KERNELBASE SFRXXX */
112
113 /* Offset 0x38 - Pointer to start of embedded System.map */
114 .globl embedded_sysmap_start
115embedded_sysmap_start:
116 .llong 0
117 /* Offset 0x40 - Pointer to end of embedded System.map */
118 .globl embedded_sysmap_end
119embedded_sysmap_end:
120 .llong 0
121
122#else /* CONFIG_PPC_ISERIES */
123
124 /* Secondary processors spin on this value until it goes to 1. */
125 .globl __secondary_hold_spinloop
126__secondary_hold_spinloop:
127 .llong 0x0
128
129 /* Secondary processors write this value with their cpu # */
130 /* after they enter the spin loop immediately below. */
131 .globl __secondary_hold_acknowledge
132__secondary_hold_acknowledge:
133 .llong 0x0
134
135 . = 0x60
136/*
137 * The following code is used on pSeries to hold secondary processors
138 * in a spin loop after they have been freed from OpenFirmware, but
139 * before the bulk of the kernel has been relocated. This code
140 * is relocated to physical address 0x60 before prom_init is run.
141 * All of it must fit below the first exception vector at 0x100.
142 */
143_GLOBAL(__secondary_hold)
144 mfmsr r24
145 ori r24,r24,MSR_RI
146 mtmsrd r24 /* RI on */
147
148 /* Grab our linux cpu number */
149 mr r24,r3
150
151 /* Tell the master cpu we're here */
152 /* Relocation is off & we are located at an address less */
153 /* than 0x100, so only need to grab low order offset. */
154 std r24,__secondary_hold_acknowledge@l(0)
155 sync
156
157 /* All secondary cpu's wait here until told to start. */
158100: ld r4,__secondary_hold_spinloop@l(0)
159 cmpdi 0,r4,1
160 bne 100b
161
162#ifdef CONFIG_HMT
163 b .hmt_init
164#else
165#ifdef CONFIG_SMP
166 mr r3,r24
167 b .pSeries_secondary_smp_init
168#else
169 BUG_OPCODE
170#endif
171#endif
172#endif
173
174/* This value is used to mark exception frames on the stack. */
175 .section ".toc","aw"
176exception_marker:
177 .tc ID_72656773_68657265[TC],0x7265677368657265
178 .text
179
180/*
181 * The following macros define the code that appears as
182 * the prologue to each of the exception handlers. They
183 * are split into two parts to allow a single kernel binary
184 * to be used for pSeries and iSeries.
185 * LOL. One day... - paulus
186 */
187
188/*
189 * We make as much of the exception code common between native
190 * exception handlers (including pSeries LPAR) and iSeries LPAR
191 * implementations as possible.
192 */
193
194/*
195 * This is the start of the interrupt handlers for pSeries
196 * This code runs with relocation off.
197 */
198#define EX_R9 0
199#define EX_R10 8
200#define EX_R11 16
201#define EX_R12 24
202#define EX_R13 32
203#define EX_SRR0 40
204#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
205#define EX_DAR 48
206#define EX_LR 48 /* SLB miss saves LR, but not DAR */
207#define EX_DSISR 56
208#define EX_CCR 60
209
210#define EXCEPTION_PROLOG_PSERIES(area, label) \
211 mfspr r13,SPRG3; /* get paca address into r13 */ \
212 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
213 std r10,area+EX_R10(r13); \
214 std r11,area+EX_R11(r13); \
215 std r12,area+EX_R12(r13); \
216 mfspr r9,SPRG1; \
217 std r9,area+EX_R13(r13); \
218 mfcr r9; \
219 clrrdi r12,r13,32; /* get high part of &label */ \
220 mfmsr r10; \
221 mfspr r11,SRR0; /* save SRR0 */ \
222 ori r12,r12,(label)@l; /* virt addr of handler */ \
223 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
224 mtspr SRR0,r12; \
225 mfspr r12,SRR1; /* and SRR1 */ \
226 mtspr SRR1,r10; \
227 rfid; \
228 b . /* prevent speculative execution */
229
230/*
231 * This is the start of the interrupt handlers for iSeries
232 * This code runs with relocation on.
233 */
234#define EXCEPTION_PROLOG_ISERIES_1(area) \
235 mfspr r13,SPRG3; /* get paca address into r13 */ \
236 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
237 std r10,area+EX_R10(r13); \
238 std r11,area+EX_R11(r13); \
239 std r12,area+EX_R12(r13); \
240 mfspr r9,SPRG1; \
241 std r9,area+EX_R13(r13); \
242 mfcr r9
243
244#define EXCEPTION_PROLOG_ISERIES_2 \
245 mfmsr r10; \
246 ld r11,PACALPPACA+LPPACASRR0(r13); \
247 ld r12,PACALPPACA+LPPACASRR1(r13); \
248 ori r10,r10,MSR_RI; \
249 mtmsrd r10,1
250
251/*
252 * The common exception prolog is used for all except a few exceptions
253 * such as a segment miss on a kernel address. We have to be prepared
254 * to take another exception from the point where we first touch the
255 * kernel stack onwards.
256 *
257 * On entry r13 points to the paca, r9-r13 are saved in the paca,
258 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
259 * SRR1, and relocation is on.
260 */
261#define EXCEPTION_PROLOG_COMMON(n, area) \
262 andi. r10,r12,MSR_PR; /* See if coming from user */ \
263 mr r10,r1; /* Save r1 */ \
264 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
265 beq- 1f; \
266 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2671: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
268 bge- cr1,bad_stack; /* abort if it is */ \
269 std r9,_CCR(r1); /* save CR in stackframe */ \
270 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
271 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
272 std r10,0(r1); /* make stack chain pointer */ \
273 std r0,GPR0(r1); /* save r0 in stackframe */ \
274 std r10,GPR1(r1); /* save r1 in stackframe */ \
275 std r2,GPR2(r1); /* save r2 in stackframe */ \
276 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
277 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
278 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
279 ld r10,area+EX_R10(r13); \
280 std r9,GPR9(r1); \
281 std r10,GPR10(r1); \
282 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
283 ld r10,area+EX_R12(r13); \
284 ld r11,area+EX_R13(r13); \
285 std r9,GPR11(r1); \
286 std r10,GPR12(r1); \
287 std r11,GPR13(r1); \
288 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
289 mflr r9; /* save LR in stackframe */ \
290 std r9,_LINK(r1); \
291 mfctr r10; /* save CTR in stackframe */ \
292 std r10,_CTR(r1); \
293 mfspr r11,XER; /* save XER in stackframe */ \
294 std r11,_XER(r1); \
295 li r9,(n)+1; \
296 std r9,_TRAP(r1); /* set trap number */ \
297 li r10,0; \
298 ld r11,exception_marker@toc(r2); \
299 std r10,RESULT(r1); /* clear regs->result */ \
300 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
301
302/*
303 * Exception vectors.
304 */
305#define STD_EXCEPTION_PSERIES(n, label) \
306 . = n; \
307 .globl label##_pSeries; \
308label##_pSeries: \
309 HMT_MEDIUM; \
310 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700311 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
313
314#define STD_EXCEPTION_ISERIES(n, label, area) \
315 .globl label##_iSeries; \
316label##_iSeries: \
317 HMT_MEDIUM; \
318 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700319 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 EXCEPTION_PROLOG_ISERIES_1(area); \
321 EXCEPTION_PROLOG_ISERIES_2; \
322 b label##_common
323
324#define MASKABLE_EXCEPTION_ISERIES(n, label) \
325 .globl label##_iSeries; \
326label##_iSeries: \
327 HMT_MEDIUM; \
328 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700329 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
331 lbz r10,PACAPROCENABLED(r13); \
332 cmpwi 0,r10,0; \
333 beq- label##_iSeries_masked; \
334 EXCEPTION_PROLOG_ISERIES_2; \
335 b label##_common; \
336
337#ifdef DO_SOFT_DISABLE
338#define DISABLE_INTS \
339 lbz r10,PACAPROCENABLED(r13); \
340 li r11,0; \
341 std r10,SOFTE(r1); \
342 mfmsr r10; \
343 stb r11,PACAPROCENABLED(r13); \
344 ori r10,r10,MSR_EE; \
345 mtmsrd r10,1
346
347#define ENABLE_INTS \
348 lbz r10,PACAPROCENABLED(r13); \
349 mfmsr r11; \
350 std r10,SOFTE(r1); \
351 ori r11,r11,MSR_EE; \
352 mtmsrd r11,1
353
354#else /* hard enable/disable interrupts */
355#define DISABLE_INTS
356
357#define ENABLE_INTS \
358 ld r12,_MSR(r1); \
359 mfmsr r11; \
360 rlwimi r11,r12,0,MSR_EE; \
361 mtmsrd r11,1
362
363#endif
364
365#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
366 .align 7; \
367 .globl label##_common; \
368label##_common: \
369 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
370 DISABLE_INTS; \
371 bl .save_nvgprs; \
372 addi r3,r1,STACK_FRAME_OVERHEAD; \
373 bl hdlr; \
374 b .ret_from_except
375
376#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
377 .align 7; \
378 .globl label##_common; \
379label##_common: \
380 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
381 DISABLE_INTS; \
382 addi r3,r1,STACK_FRAME_OVERHEAD; \
383 bl hdlr; \
384 b .ret_from_except_lite
385
386/*
387 * Start of pSeries system interrupt routines
388 */
389 . = 0x100
390 .globl __start_interrupts
391__start_interrupts:
392
393 STD_EXCEPTION_PSERIES(0x100, system_reset)
394
395 . = 0x200
396_machine_check_pSeries:
397 HMT_MEDIUM
398 mtspr SPRG1,r13 /* save r13 */
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700399 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
401
402 . = 0x300
403 .globl data_access_pSeries
404data_access_pSeries:
405 HMT_MEDIUM
406 mtspr SPRG1,r13
407BEGIN_FTR_SECTION
408 mtspr SPRG2,r12
409 mfspr r13,DAR
410 mfspr r12,DSISR
411 srdi r13,r13,60
412 rlwimi r13,r12,16,0x20
413 mfcr r12
414 cmpwi r13,0x2c
415 beq .do_stab_bolted_pSeries
416 mtcrf 0x80,r12
417 mfspr r12,SPRG2
418END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
419 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
420
421 . = 0x380
422 .globl data_access_slb_pSeries
423data_access_slb_pSeries:
424 HMT_MEDIUM
425 mtspr SPRG1,r13
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700426 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 mfspr r13,SPRG3 /* get paca address into r13 */
428 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
429 std r10,PACA_EXSLB+EX_R10(r13)
430 std r11,PACA_EXSLB+EX_R11(r13)
431 std r12,PACA_EXSLB+EX_R12(r13)
432 std r3,PACA_EXSLB+EX_R3(r13)
433 mfspr r9,SPRG1
434 std r9,PACA_EXSLB+EX_R13(r13)
435 mfcr r9
436 mfspr r12,SRR1 /* and SRR1 */
437 mfspr r3,DAR
438 b .do_slb_miss /* Rel. branch works in real mode */
439
440 STD_EXCEPTION_PSERIES(0x400, instruction_access)
441
442 . = 0x480
443 .globl instruction_access_slb_pSeries
444instruction_access_slb_pSeries:
445 HMT_MEDIUM
446 mtspr SPRG1,r13
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700447 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 mfspr r13,SPRG3 /* get paca address into r13 */
449 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
450 std r10,PACA_EXSLB+EX_R10(r13)
451 std r11,PACA_EXSLB+EX_R11(r13)
452 std r12,PACA_EXSLB+EX_R12(r13)
453 std r3,PACA_EXSLB+EX_R3(r13)
454 mfspr r9,SPRG1
455 std r9,PACA_EXSLB+EX_R13(r13)
456 mfcr r9
457 mfspr r12,SRR1 /* and SRR1 */
458 mfspr r3,SRR0 /* SRR0 is faulting address */
459 b .do_slb_miss /* Rel. branch works in real mode */
460
461 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
462 STD_EXCEPTION_PSERIES(0x600, alignment)
463 STD_EXCEPTION_PSERIES(0x700, program_check)
464 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
465 STD_EXCEPTION_PSERIES(0x900, decrementer)
466 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
467 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
468
469 . = 0xc00
470 .globl system_call_pSeries
471system_call_pSeries:
472 HMT_MEDIUM
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700473 RUNLATCH_ON(r9)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 mr r9,r13
475 mfmsr r10
476 mfspr r13,SPRG3
477 mfspr r11,SRR0
478 clrrdi r12,r13,32
479 oris r12,r12,system_call_common@h
480 ori r12,r12,system_call_common@l
481 mtspr SRR0,r12
482 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
483 mfspr r12,SRR1
484 mtspr SRR1,r10
485 rfid
486 b . /* prevent speculative execution */
487
488 STD_EXCEPTION_PSERIES(0xd00, single_step)
489 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
490
491 /* We need to deal with the Altivec unavailable exception
492 * here which is at 0xf20, thus in the middle of the
493 * prolog code of the PerformanceMonitor one. A little
494 * trickery is thus necessary
495 */
496 . = 0xf00
497 b performance_monitor_pSeries
498
499 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
500
501 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
502 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
503
David Gibsonec465512005-08-19 14:52:31 +1000504 . = 0x3000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
David Gibsonec465512005-08-19 14:52:31 +1000506/*** pSeries interrupt support ***/
507
508 /* moved from 0xf00 */
509 STD_EXCEPTION_PSERIES(., performance_monitor)
510
511 .align 7
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512_GLOBAL(do_stab_bolted_pSeries)
513 mtcrf 0x80,r12
514 mfspr r12,SPRG2
515 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
516
David Gibsonec465512005-08-19 14:52:31 +1000517/*
518 * Vectors for the FWNMI option. Share common code.
519 */
520 .globl system_reset_fwnmi
521system_reset_fwnmi:
522 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
David Gibsonec465512005-08-19 14:52:31 +1000527 .globl machine_check_fwnmi
528machine_check_fwnmi:
529 HMT_MEDIUM
530 mtspr SPRG1,r13 /* save r13 */
531 RUNLATCH_ON(r13)
532 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
David Gibson2e2446e2005-08-19 14:52:31 +1000534#ifdef CONFIG_PPC_ISERIES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535/*** ISeries-LPAR interrupt handlers ***/
536
537 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
538
539 .globl data_access_iSeries
540data_access_iSeries:
541 mtspr SPRG1,r13
542BEGIN_FTR_SECTION
543 mtspr SPRG2,r12
544 mfspr r13,DAR
545 mfspr r12,DSISR
546 srdi r13,r13,60
547 rlwimi r13,r12,16,0x20
548 mfcr r12
549 cmpwi r13,0x2c
550 beq .do_stab_bolted_iSeries
551 mtcrf 0x80,r12
552 mfspr r12,SPRG2
553END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
554 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
555 EXCEPTION_PROLOG_ISERIES_2
556 b data_access_common
557
558.do_stab_bolted_iSeries:
559 mtcrf 0x80,r12
560 mfspr r12,SPRG2
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 EXCEPTION_PROLOG_ISERIES_2
563 b .do_stab_bolted
564
565 .globl data_access_slb_iSeries
566data_access_slb_iSeries:
567 mtspr SPRG1,r13 /* save r13 */
568 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
569 std r3,PACA_EXSLB+EX_R3(r13)
570 ld r12,PACALPPACA+LPPACASRR1(r13)
571 mfspr r3,DAR
572 b .do_slb_miss
573
574 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
575
576 .globl instruction_access_slb_iSeries
577instruction_access_slb_iSeries:
578 mtspr SPRG1,r13 /* save r13 */
579 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
580 std r3,PACA_EXSLB+EX_R3(r13)
581 ld r12,PACALPPACA+LPPACASRR1(r13)
582 ld r3,PACALPPACA+LPPACASRR0(r13)
583 b .do_slb_miss
584
585 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
586 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
587 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
588 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
589 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
590 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
591 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
592
593 .globl system_call_iSeries
594system_call_iSeries:
595 mr r9,r13
596 mfspr r13,SPRG3
597 EXCEPTION_PROLOG_ISERIES_2
598 b system_call_common
599
600 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
601 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
602 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
603
604 .globl system_reset_iSeries
605system_reset_iSeries:
606 mfspr r13,SPRG3 /* Get paca address */
607 mfmsr r24
608 ori r24,r24,MSR_RI
609 mtmsrd r24 /* RI on */
610 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
611 cmpwi 0,r24,0 /* Are we processor 0? */
612 beq .__start_initialization_iSeries /* Start up the first processor */
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -0700613 mfspr r4,SPRN_CTRLF
614 li r5,CTRL_RUNLATCH /* Turn off the run light */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 andc r4,r4,r5
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -0700616 mtspr SPRN_CTRLT,r4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
6181:
619 HMT_LOW
620#ifdef CONFIG_SMP
621 lbz r23,PACAPROCSTART(r13) /* Test if this processor
622 * should start */
623 sync
624 LOADADDR(r3,current_set)
625 sldi r28,r24,3 /* get current_set[cpu#] */
626 ldx r3,r3,r28
627 addi r1,r3,THREAD_SIZE
628 subi r1,r1,STACK_FRAME_OVERHEAD
629
630 cmpwi 0,r23,0
631 beq iSeries_secondary_smp_loop /* Loop until told to go */
632#ifdef SECONDARY_PROCESSORS
633 bne .__secondary_start /* Loop until told to go */
634#endif
635iSeries_secondary_smp_loop:
636 /* Let the Hypervisor know we are alive */
637 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
638 lis r3,0x8002
639 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
640#else /* CONFIG_SMP */
641 /* Yield the processor. This is required for non-SMP kernels
642 which are running on multi-threaded machines. */
643 lis r3,0x8000
644 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
645 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
646 li r4,0 /* "yield timed" */
647 li r5,-1 /* "yield forever" */
648#endif /* CONFIG_SMP */
649 li r0,-1 /* r0=-1 indicates a Hypervisor call */
650 sc /* Invoke the hypervisor via a system call */
651 mfspr r13,SPRG3 /* Put r13 back ???? */
652 b 1b /* If SMP not configured, secondaries
653 * loop forever */
654
655 .globl decrementer_iSeries_masked
656decrementer_iSeries_masked:
657 li r11,1
658 stb r11,PACALPPACA+LPPACADECRINT(r13)
659 lwz r12,PACADEFAULTDECR(r13)
660 mtspr SPRN_DEC,r12
661 /* fall through */
662
663 .globl hardware_interrupt_iSeries_masked
664hardware_interrupt_iSeries_masked:
665 mtcrf 0x80,r9 /* Restore regs */
666 ld r11,PACALPPACA+LPPACASRR0(r13)
667 ld r12,PACALPPACA+LPPACASRR1(r13)
668 mtspr SRR0,r11
669 mtspr SRR1,r12
670 ld r9,PACA_EXGEN+EX_R9(r13)
671 ld r10,PACA_EXGEN+EX_R10(r13)
672 ld r11,PACA_EXGEN+EX_R11(r13)
673 ld r12,PACA_EXGEN+EX_R12(r13)
674 ld r13,PACA_EXGEN+EX_R13(r13)
675 rfid
676 b . /* prevent speculative execution */
Stephen Rothwell2ad56492005-08-17 13:01:50 +1000677#endif /* CONFIG_PPC_ISERIES */
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679/*** Common interrupt handlers ***/
680
681 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
682
683 /*
684 * Machine check is different because we use a different
685 * save area: PACA_EXMC instead of PACA_EXGEN.
686 */
687 .align 7
688 .globl machine_check_common
689machine_check_common:
690 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
691 DISABLE_INTS
692 bl .save_nvgprs
693 addi r3,r1,STACK_FRAME_OVERHEAD
694 bl .machine_check_exception
695 b .ret_from_except
696
697 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
698 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
699 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
700 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
701 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
702 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
703 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
704#ifdef CONFIG_ALTIVEC
705 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
706#else
707 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
708#endif
709
710/*
711 * Here we have detected that the kernel stack pointer is bad.
712 * R9 contains the saved CR, r13 points to the paca,
713 * r10 contains the (bad) kernel stack pointer,
714 * r11 and r12 contain the saved SRR0 and SRR1.
715 * We switch to using the paca guard page as an emergency stack,
716 * save the registers there, and call kernel_bad_stack(), which panics.
717 */
718bad_stack:
719 ld r1,PACAEMERGSP(r13)
720 subi r1,r1,64+INT_FRAME_SIZE
721 std r9,_CCR(r1)
722 std r10,GPR1(r1)
723 std r11,_NIP(r1)
724 std r12,_MSR(r1)
725 mfspr r11,DAR
726 mfspr r12,DSISR
727 std r11,_DAR(r1)
728 std r12,_DSISR(r1)
729 mflr r10
730 mfctr r11
731 mfxer r12
732 std r10,_LINK(r1)
733 std r11,_CTR(r1)
734 std r12,_XER(r1)
735 SAVE_GPR(0,r1)
736 SAVE_GPR(2,r1)
737 SAVE_4GPRS(3,r1)
738 SAVE_2GPRS(7,r1)
739 SAVE_10GPRS(12,r1)
740 SAVE_10GPRS(22,r1)
741 addi r11,r1,INT_FRAME_SIZE
742 std r11,0(r1)
743 li r12,0
744 std r12,0(r11)
745 ld r2,PACATOC(r13)
7461: addi r3,r1,STACK_FRAME_OVERHEAD
747 bl .kernel_bad_stack
748 b 1b
749
750/*
751 * Return from an exception with minimal checks.
752 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
753 * If interrupts have been enabled, or anything has been
754 * done that might have changed the scheduling status of
755 * any task or sent any task a signal, you should use
756 * ret_from_except or ret_from_except_lite instead of this.
757 */
758fast_exception_return:
759 ld r12,_MSR(r1)
760 ld r11,_NIP(r1)
761 andi. r3,r12,MSR_RI /* check if RI is set */
762 beq- unrecov_fer
763 ld r3,_CCR(r1)
764 ld r4,_LINK(r1)
765 ld r5,_CTR(r1)
766 ld r6,_XER(r1)
767 mtcr r3
768 mtlr r4
769 mtctr r5
770 mtxer r6
771 REST_GPR(0, r1)
772 REST_8GPRS(2, r1)
773
774 mfmsr r10
775 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
776 mtmsrd r10,1
777
778 mtspr SRR1,r12
779 mtspr SRR0,r11
780 REST_4GPRS(10, r1)
781 ld r1,GPR1(r1)
782 rfid
783 b . /* prevent speculative execution */
784
785unrecov_fer:
786 bl .save_nvgprs
7871: addi r3,r1,STACK_FRAME_OVERHEAD
788 bl .unrecoverable_exception
789 b 1b
790
791/*
792 * Here r13 points to the paca, r9 contains the saved CR,
793 * SRR0 and SRR1 are saved in r11 and r12,
794 * r9 - r13 are saved in paca->exgen.
795 */
796 .align 7
797 .globl data_access_common
798data_access_common:
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700799 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 mfspr r10,DAR
801 std r10,PACA_EXGEN+EX_DAR(r13)
802 mfspr r10,DSISR
803 stw r10,PACA_EXGEN+EX_DSISR(r13)
804 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
805 ld r3,PACA_EXGEN+EX_DAR(r13)
806 lwz r4,PACA_EXGEN+EX_DSISR(r13)
807 li r5,0x300
808 b .do_hash_page /* Try to handle as hpte fault */
809
810 .align 7
811 .globl instruction_access_common
812instruction_access_common:
813 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
814 ld r3,_NIP(r1)
815 andis. r4,r12,0x5820
816 li r5,0x400
817 b .do_hash_page /* Try to handle as hpte fault */
818
819 .align 7
820 .globl hardware_interrupt_common
821 .globl hardware_interrupt_entry
822hardware_interrupt_common:
823 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
824hardware_interrupt_entry:
825 DISABLE_INTS
826 addi r3,r1,STACK_FRAME_OVERHEAD
827 bl .do_IRQ
828 b .ret_from_except_lite
829
830 .align 7
831 .globl alignment_common
832alignment_common:
833 mfspr r10,DAR
834 std r10,PACA_EXGEN+EX_DAR(r13)
835 mfspr r10,DSISR
836 stw r10,PACA_EXGEN+EX_DSISR(r13)
837 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
838 ld r3,PACA_EXGEN+EX_DAR(r13)
839 lwz r4,PACA_EXGEN+EX_DSISR(r13)
840 std r3,_DAR(r1)
841 std r4,_DSISR(r1)
842 bl .save_nvgprs
843 addi r3,r1,STACK_FRAME_OVERHEAD
844 ENABLE_INTS
845 bl .alignment_exception
846 b .ret_from_except
847
848 .align 7
849 .globl program_check_common
850program_check_common:
851 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
852 bl .save_nvgprs
853 addi r3,r1,STACK_FRAME_OVERHEAD
854 ENABLE_INTS
855 bl .program_check_exception
856 b .ret_from_except
857
858 .align 7
859 .globl fp_unavailable_common
860fp_unavailable_common:
861 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
862 bne .load_up_fpu /* if from user, just load it up */
863 bl .save_nvgprs
864 addi r3,r1,STACK_FRAME_OVERHEAD
865 ENABLE_INTS
866 bl .kernel_fp_unavailable_exception
867 BUG_OPCODE
868
David Gibsonec465512005-08-19 14:52:31 +1000869/*
870 * load_up_fpu(unused, unused, tsk)
871 * Disable FP for the task which had the FPU previously,
872 * and save its floating-point registers in its thread_struct.
873 * Enables the FPU for use in the kernel on return.
874 * On SMP we know the fpu is free, since we give it up every
875 * switch (ie, no lazy save of the FP registers).
876 * On entry: r13 == 'current' && last_task_used_math != 'current'
877 */
878_STATIC(load_up_fpu)
879 mfmsr r5 /* grab the current MSR */
880 ori r5,r5,MSR_FP
881 mtmsrd r5 /* enable use of fpu now */
882 isync
883/*
884 * For SMP, we don't do lazy FPU switching because it just gets too
885 * horrendously complex, especially when a task switches from one CPU
886 * to another. Instead we call giveup_fpu in switch_to.
887 *
888 */
889#ifndef CONFIG_SMP
890 ld r3,last_task_used_math@got(r2)
891 ld r4,0(r3)
892 cmpdi 0,r4,0
893 beq 1f
894 /* Save FP state to last_task_used_math's THREAD struct */
895 addi r4,r4,THREAD
896 SAVE_32FPRS(0, r4)
897 mffs fr0
898 stfd fr0,THREAD_FPSCR(r4)
899 /* Disable FP for last_task_used_math */
900 ld r5,PT_REGS(r4)
901 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
902 li r6,MSR_FP|MSR_FE0|MSR_FE1
903 andc r4,r4,r6
904 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9051:
906#endif /* CONFIG_SMP */
907 /* enable use of FP after return */
908 ld r4,PACACURRENT(r13)
909 addi r5,r4,THREAD /* Get THREAD */
910 ld r4,THREAD_FPEXC_MODE(r5)
911 ori r12,r12,MSR_FP
912 or r12,r12,r4
913 std r12,_MSR(r1)
914 lfd fr0,THREAD_FPSCR(r5)
915 mtfsf 0xff,fr0
916 REST_32FPRS(0, r5)
917#ifndef CONFIG_SMP
918 /* Update last_task_used_math to 'current' */
919 subi r4,r5,THREAD /* Back to 'current' */
920 std r4,0(r3)
921#endif /* CONFIG_SMP */
922 /* restore registers and return */
923 b fast_exception_return
924
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 .align 7
926 .globl altivec_unavailable_common
927altivec_unavailable_common:
928 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
929#ifdef CONFIG_ALTIVEC
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700930BEGIN_FTR_SECTION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 bne .load_up_altivec /* if from user, just load it up */
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700932END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933#endif
934 bl .save_nvgprs
935 addi r3,r1,STACK_FRAME_OVERHEAD
936 ENABLE_INTS
937 bl .altivec_unavailable_exception
938 b .ret_from_except
939
David Gibsonec465512005-08-19 14:52:31 +1000940#ifdef CONFIG_ALTIVEC
941/*
942 * load_up_altivec(unused, unused, tsk)
943 * Disable VMX for the task which had it previously,
944 * and save its vector registers in its thread_struct.
945 * Enables the VMX for use in the kernel on return.
946 * On SMP we know the VMX is free, since we give it up every
947 * switch (ie, no lazy save of the vector registers).
948 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
949 */
950_STATIC(load_up_altivec)
951 mfmsr r5 /* grab the current MSR */
952 oris r5,r5,MSR_VEC@h
953 mtmsrd r5 /* enable use of VMX now */
954 isync
955
956/*
957 * For SMP, we don't do lazy VMX switching because it just gets too
958 * horrendously complex, especially when a task switches from one CPU
959 * to another. Instead we call giveup_altvec in switch_to.
960 * VRSAVE isn't dealt with here, that is done in the normal context
961 * switch code. Note that we could rely on vrsave value to eventually
962 * avoid saving all of the VREGs here...
963 */
964#ifndef CONFIG_SMP
965 ld r3,last_task_used_altivec@got(r2)
966 ld r4,0(r3)
967 cmpdi 0,r4,0
968 beq 1f
969 /* Save VMX state to last_task_used_altivec's THREAD struct */
970 addi r4,r4,THREAD
971 SAVE_32VRS(0,r5,r4)
972 mfvscr vr0
973 li r10,THREAD_VSCR
974 stvx vr0,r10,r4
975 /* Disable VMX for last_task_used_altivec */
976 ld r5,PT_REGS(r4)
977 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
978 lis r6,MSR_VEC@h
979 andc r4,r4,r6
980 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9811:
982#endif /* CONFIG_SMP */
983 /* Hack: if we get an altivec unavailable trap with VRSAVE
984 * set to all zeros, we assume this is a broken application
985 * that fails to set it properly, and thus we switch it to
986 * all 1's
987 */
988 mfspr r4,SPRN_VRSAVE
989 cmpdi 0,r4,0
990 bne+ 1f
991 li r4,-1
992 mtspr SPRN_VRSAVE,r4
9931:
994 /* enable use of VMX after return */
995 ld r4,PACACURRENT(r13)
996 addi r5,r4,THREAD /* Get THREAD */
997 oris r12,r12,MSR_VEC@h
998 std r12,_MSR(r1)
999 li r4,1
1000 li r10,THREAD_VSCR
1001 stw r4,THREAD_USED_VR(r5)
1002 lvx vr0,r10,r5
1003 mtvscr vr0
1004 REST_32VRS(0,r4,r5)
1005#ifndef CONFIG_SMP
1006 /* Update last_task_used_math to 'current' */
1007 subi r4,r5,THREAD /* Back to 'current' */
1008 std r4,0(r3)
1009#endif /* CONFIG_SMP */
1010 /* restore registers and return */
1011 b fast_exception_return
1012#endif /* CONFIG_ALTIVEC */
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014/*
1015 * Hash table stuff
1016 */
1017 .align 7
1018_GLOBAL(do_hash_page)
1019 std r3,_DAR(r1)
1020 std r4,_DSISR(r1)
1021
1022 andis. r0,r4,0xa450 /* weird error? */
1023 bne- .handle_page_fault /* if not, try to insert a HPTE */
1024BEGIN_FTR_SECTION
1025 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1026 bne- .do_ste_alloc /* If so handle it */
1027END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1028
1029 /*
1030 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1031 * accessing a userspace segment (even from the kernel). We assume
1032 * kernel addresses always have the high bit set.
1033 */
1034 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1035 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1036 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1037 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1038 ori r4,r4,1 /* add _PAGE_PRESENT */
1039 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1040
1041 /*
1042 * On iSeries, we soft-disable interrupts here, then
1043 * hard-enable interrupts so that the hash_page code can spin on
1044 * the hash_table_lock without problems on a shared processor.
1045 */
1046 DISABLE_INTS
1047
1048 /*
1049 * r3 contains the faulting address
1050 * r4 contains the required access permissions
1051 * r5 contains the trap number
1052 *
1053 * at return r3 = 0 for success
1054 */
1055 bl .hash_page /* build HPTE if possible */
1056 cmpdi r3,0 /* see if hash_page succeeded */
1057
1058#ifdef DO_SOFT_DISABLE
1059 /*
1060 * If we had interrupts soft-enabled at the point where the
1061 * DSI/ISI occurred, and an interrupt came in during hash_page,
1062 * handle it now.
1063 * We jump to ret_from_except_lite rather than fast_exception_return
1064 * because ret_from_except_lite will check for and handle pending
1065 * interrupts if necessary.
1066 */
1067 beq .ret_from_except_lite
1068 /* For a hash failure, we don't bother re-enabling interrupts */
1069 ble- 12f
1070
1071 /*
1072 * hash_page couldn't handle it, set soft interrupt enable back
1073 * to what it was before the trap. Note that .local_irq_restore
1074 * handles any interrupts pending at this point.
1075 */
1076 ld r3,SOFTE(r1)
1077 bl .local_irq_restore
1078 b 11f
1079#else
1080 beq fast_exception_return /* Return from exception on success */
1081 ble- 12f /* Failure return from hash_page */
1082
1083 /* fall through */
1084#endif
1085
1086/* Here we have a page fault that hash_page can't handle. */
1087_GLOBAL(handle_page_fault)
1088 ENABLE_INTS
108911: ld r4,_DAR(r1)
1090 ld r5,_DSISR(r1)
1091 addi r3,r1,STACK_FRAME_OVERHEAD
1092 bl .do_page_fault
1093 cmpdi r3,0
1094 beq+ .ret_from_except_lite
1095 bl .save_nvgprs
1096 mr r5,r3
1097 addi r3,r1,STACK_FRAME_OVERHEAD
1098 lwz r4,_DAR(r1)
1099 bl .bad_page_fault
1100 b .ret_from_except
1101
1102/* We have a page fault that hash_page could handle but HV refused
1103 * the PTE insertion
1104 */
110512: bl .save_nvgprs
1106 addi r3,r1,STACK_FRAME_OVERHEAD
1107 lwz r4,_DAR(r1)
1108 bl .low_hash_fault
1109 b .ret_from_except
1110
1111 /* here we have a segment miss */
1112_GLOBAL(do_ste_alloc)
1113 bl .ste_allocate /* try to insert stab entry */
1114 cmpdi r3,0
1115 beq+ fast_exception_return
1116 b .handle_page_fault
1117
1118/*
1119 * r13 points to the PACA, r9 contains the saved CR,
1120 * r11 and r12 contain the saved SRR0 and SRR1.
1121 * r9 - r13 are saved in paca->exslb.
1122 * We assume we aren't going to take any exceptions during this procedure.
1123 * We assume (DAR >> 60) == 0xc.
1124 */
1125 .align 7
1126_GLOBAL(do_stab_bolted)
1127 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1128 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1129
1130 /* Hash to the primary group */
1131 ld r10,PACASTABVIRT(r13)
1132 mfspr r11,DAR
1133 srdi r11,r11,28
1134 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1135
1136 /* Calculate VSID */
1137 /* This is a kernel address, so protovsid = ESID */
1138 ASM_VSID_SCRAMBLE(r11, r9)
1139 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1140
1141 /* Search the primary group for a free entry */
11421: ld r11,0(r10) /* Test valid bit of the current ste */
1143 andi. r11,r11,0x80
1144 beq 2f
1145 addi r10,r10,16
1146 andi. r11,r10,0x70
1147 bne 1b
1148
1149 /* Stick for only searching the primary group for now. */
1150 /* At least for now, we use a very simple random castout scheme */
1151 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1152 mftb r11
1153 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1154 ori r11,r11,0x10
1155
1156 /* r10 currently points to an ste one past the group of interest */
1157 /* make it point to the randomly selected entry */
1158 subi r10,r10,128
1159 or r10,r10,r11 /* r10 is the entry to invalidate */
1160
1161 isync /* mark the entry invalid */
1162 ld r11,0(r10)
1163 rldicl r11,r11,56,1 /* clear the valid bit */
1164 rotldi r11,r11,8
1165 std r11,0(r10)
1166 sync
1167
1168 clrrdi r11,r11,28 /* Get the esid part of the ste */
1169 slbie r11
1170
11712: std r9,8(r10) /* Store the vsid part of the ste */
1172 eieio
1173
1174 mfspr r11,DAR /* Get the new esid */
1175 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1176 ori r11,r11,0x90 /* Turn on valid and kp */
1177 std r11,0(r10) /* Put new entry back into the stab */
1178
1179 sync
1180
1181 /* All done -- return from exception. */
1182 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1183 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1184
1185 andi. r10,r12,MSR_RI
1186 beq- unrecov_slb
1187
1188 mtcrf 0x80,r9 /* restore CR */
1189
1190 mfmsr r10
1191 clrrdi r10,r10,2
1192 mtmsrd r10,1
1193
1194 mtspr SRR0,r11
1195 mtspr SRR1,r12
1196 ld r9,PACA_EXSLB+EX_R9(r13)
1197 ld r10,PACA_EXSLB+EX_R10(r13)
1198 ld r11,PACA_EXSLB+EX_R11(r13)
1199 ld r12,PACA_EXSLB+EX_R12(r13)
1200 ld r13,PACA_EXSLB+EX_R13(r13)
1201 rfid
1202 b . /* prevent speculative execution */
1203
1204/*
1205 * r13 points to the PACA, r9 contains the saved CR,
1206 * r11 and r12 contain the saved SRR0 and SRR1.
1207 * r3 has the faulting address
1208 * r9 - r13 are saved in paca->exslb.
1209 * r3 is saved in paca->slb_r3
1210 * We assume we aren't going to take any exceptions during this procedure.
1211 */
1212_GLOBAL(do_slb_miss)
1213 mflr r10
1214
1215 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1216 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1217
1218 bl .slb_allocate /* handle it */
1219
1220 /* All done -- return from exception. */
1221
1222 ld r10,PACA_EXSLB+EX_LR(r13)
1223 ld r3,PACA_EXSLB+EX_R3(r13)
1224 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1225#ifdef CONFIG_PPC_ISERIES
1226 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1227#endif /* CONFIG_PPC_ISERIES */
1228
1229 mtlr r10
1230
1231 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1232 beq- unrecov_slb
1233
1234.machine push
1235.machine "power4"
1236 mtcrf 0x80,r9
1237 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1238.machine pop
1239
1240#ifdef CONFIG_PPC_ISERIES
1241 mtspr SRR0,r11
1242 mtspr SRR1,r12
1243#endif /* CONFIG_PPC_ISERIES */
1244 ld r9,PACA_EXSLB+EX_R9(r13)
1245 ld r10,PACA_EXSLB+EX_R10(r13)
1246 ld r11,PACA_EXSLB+EX_R11(r13)
1247 ld r12,PACA_EXSLB+EX_R12(r13)
1248 ld r13,PACA_EXSLB+EX_R13(r13)
1249 rfid
1250 b . /* prevent speculative execution */
1251
1252unrecov_slb:
1253 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1254 DISABLE_INTS
1255 bl .save_nvgprs
12561: addi r3,r1,STACK_FRAME_OVERHEAD
1257 bl .unrecoverable_exception
1258 b 1b
1259
David Gibsonec465512005-08-19 14:52:31 +10001260/*
David Gibsonc59c4642005-08-19 14:52:31 +10001261 * Space for CPU0's segment table.
1262 *
1263 * On iSeries, the hypervisor must fill in at least one entry before
1264 * we get control (with relocate on). The address is give to the hv
1265 * as a page number (see xLparMap in LparData.c), so this must be at a
1266 * fixed address (the linker can't compute (u64)&initial_stab >>
1267 * PAGE_SHIFT).
1268 */
1269 . = STAB0_PHYS_ADDR /* 0x6000 */
1270 .globl initial_stab
1271initial_stab:
1272 .space 4096
1273
1274/*
David Gibsonec465512005-08-19 14:52:31 +10001275 * Data area reserved for FWNMI option.
1276 * This address (0x7000) is fixed by the RPA.
1277 */
1278 .= 0x7000
1279 .globl fwnmi_data_area
1280fwnmi_data_area:
1281 .space PAGE_SIZE
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283/*
1284 * On pSeries, secondary processors spin in the following code.
1285 * At entry, r3 = this processor's number (physical cpu id)
1286 */
1287_GLOBAL(pSeries_secondary_smp_init)
1288 mr r24,r3
1289
1290 /* turn on 64-bit mode */
1291 bl .enable_64b_mode
1292 isync
1293
1294 /* Copy some CPU settings from CPU 0 */
1295 bl .__restore_cpu_setup
1296
1297 /* Set up a paca value for this processor. Since we have the
R Sharadafce0d572005-06-25 14:58:10 -07001298 * physical cpu id in r24, we need to search the pacas to find
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 * which logical id maps to our physical one.
1300 */
1301 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1302 li r5,0 /* logical cpu id */
13031: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1304 cmpw r6,r24 /* Compare to our id */
1305 beq 2f
1306 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1307 addi r5,r5,1
1308 cmpwi r5,NR_CPUS
1309 blt 1b
1310
R Sharadafce0d572005-06-25 14:58:10 -07001311 mr r3,r24 /* not found, copy phys to r3 */
1312 b .kexec_wait /* next kernel might do better */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
13142: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1315 /* From now on, r24 is expected to be logica cpuid */
1316 mr r24,r5
13173: HMT_LOW
1318 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1319 /* start. */
1320 sync
1321
1322 /* Create a temp kernel stack for use before relocation is on. */
1323 ld r1,PACAEMERGSP(r13)
1324 subi r1,r1,STACK_FRAME_OVERHEAD
1325
1326 cmpwi 0,r23,0
1327#ifdef CONFIG_SMP
1328#ifdef SECONDARY_PROCESSORS
1329 bne .__secondary_start
1330#endif
1331#endif
1332 b 3b /* Loop until told to go */
1333
1334#ifdef CONFIG_PPC_ISERIES
1335_STATIC(__start_initialization_iSeries)
1336 /* Clear out the BSS */
1337 LOADADDR(r11,__bss_stop)
1338 LOADADDR(r8,__bss_start)
1339 sub r11,r11,r8 /* bss size */
1340 addi r11,r11,7 /* round up to an even double word */
1341 rldicl. r11,r11,61,3 /* shift right by 3 */
1342 beq 4f
1343 addi r8,r8,-8
1344 li r0,0
1345 mtctr r11 /* zero this many doublewords */
13463: stdu r0,8(r8)
1347 bdnz 3b
13484:
1349 LOADADDR(r1,init_thread_union)
1350 addi r1,r1,THREAD_SIZE
1351 li r0,0
1352 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1353
1354 LOADADDR(r3,cpu_specs)
1355 LOADADDR(r4,cur_cpu_spec)
1356 li r5,0
1357 bl .identify_cpu
1358
1359 LOADADDR(r2,__toc_start)
1360 addi r2,r2,0x4000
1361 addi r2,r2,0x4000
1362
1363 bl .iSeries_early_setup
1364
1365 /* relocation is on at this point */
1366
1367 b .start_here_common
1368#endif /* CONFIG_PPC_ISERIES */
1369
1370#ifdef CONFIG_PPC_MULTIPLATFORM
1371
1372_STATIC(__mmu_off)
1373 mfmsr r3
1374 andi. r0,r3,MSR_IR|MSR_DR
1375 beqlr
1376 andc r3,r3,r0
1377 mtspr SPRN_SRR0,r4
1378 mtspr SPRN_SRR1,r3
1379 sync
1380 rfid
1381 b . /* prevent speculative execution */
1382
1383
1384/*
1385 * Here is our main kernel entry point. We support currently 2 kind of entries
1386 * depending on the value of r5.
1387 *
1388 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1389 * in r3...r7
1390 *
1391 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1392 * DT block, r4 is a physical pointer to the kernel itself
1393 *
1394 */
1395_GLOBAL(__start_initialization_multiplatform)
1396 /*
1397 * Are we booted from a PROM Of-type client-interface ?
1398 */
1399 cmpldi cr0,r5,0
1400 bne .__boot_from_prom /* yes -> prom */
1401
1402 /* Save parameters */
1403 mr r31,r3
1404 mr r30,r4
1405
1406 /* Make sure we are running in 64 bits mode */
1407 bl .enable_64b_mode
1408
1409 /* Setup some critical 970 SPRs before switching MMU off */
1410 bl .__970_cpu_preinit
1411
1412 /* cpu # */
1413 li r24,0
1414
1415 /* Switch off MMU if not already */
1416 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1417 add r4,r4,r30
1418 bl .__mmu_off
1419 b .__after_prom_start
1420
1421_STATIC(__boot_from_prom)
1422 /* Save parameters */
1423 mr r31,r3
1424 mr r30,r4
1425 mr r29,r5
1426 mr r28,r6
1427 mr r27,r7
1428
1429 /* Make sure we are running in 64 bits mode */
1430 bl .enable_64b_mode
1431
1432 /* put a relocation offset into r3 */
1433 bl .reloc_offset
1434
1435 LOADADDR(r2,__toc_start)
1436 addi r2,r2,0x4000
1437 addi r2,r2,0x4000
1438
1439 /* Relocate the TOC from a virt addr to a real addr */
1440 sub r2,r2,r3
1441
1442 /* Restore parameters */
1443 mr r3,r31
1444 mr r4,r30
1445 mr r5,r29
1446 mr r6,r28
1447 mr r7,r27
1448
1449 /* Do all of the interaction with OF client interface */
1450 bl .prom_init
1451 /* We never return */
1452 trap
1453
1454/*
1455 * At this point, r3 contains the physical address we are running at,
1456 * returned by prom_init()
1457 */
1458_STATIC(__after_prom_start)
1459
1460/*
1461 * We need to run with __start at physical address 0.
1462 * This will leave some code in the first 256B of
1463 * real memory, which are reserved for software use.
1464 * The remainder of the first page is loaded with the fixed
1465 * interrupt vectors. The next two pages are filled with
1466 * unknown exception placeholders.
1467 *
1468 * Note: This process overwrites the OF exception vectors.
1469 * r26 == relocation offset
1470 * r27 == KERNELBASE
1471 */
1472 bl .reloc_offset
1473 mr r26,r3
1474 SET_REG_TO_CONST(r27,KERNELBASE)
1475
1476 li r3,0 /* target addr */
1477
1478 // XXX FIXME: Use phys returned by OF (r30)
1479 sub r4,r27,r26 /* source addr */
1480 /* current address of _start */
1481 /* i.e. where we are running */
1482 /* the source addr */
1483
1484 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1485 sub r5,r5,r27
1486
1487 li r6,0x100 /* Start offset, the first 0x100 */
1488 /* bytes were copied earlier. */
1489
1490 bl .copy_and_flush /* copy the first n bytes */
1491 /* this includes the code being */
1492 /* executed here. */
1493
1494 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1495 mtctr r0 /* that we just made/relocated */
1496 bctr
1497
14984: LOADADDR(r5,klimit)
1499 sub r5,r5,r26
1500 ld r5,0(r5) /* get the value of klimit */
1501 sub r5,r5,r27
1502 bl .copy_and_flush /* copy the rest */
1503 b .start_here_multiplatform
1504
1505#endif /* CONFIG_PPC_MULTIPLATFORM */
1506
1507/*
1508 * Copy routine used to copy the kernel to start at physical address 0
1509 * and flush and invalidate the caches as needed.
1510 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1511 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1512 *
1513 * Note: this routine *only* clobbers r0, r6 and lr
1514 */
1515_GLOBAL(copy_and_flush)
1516 addi r5,r5,-8
1517 addi r6,r6,-8
15184: li r0,16 /* Use the least common */
1519 /* denominator cache line */
1520 /* size. This results in */
1521 /* extra cache line flushes */
1522 /* but operation is correct. */
1523 /* Can't get cache line size */
1524 /* from NACA as it is being */
1525 /* moved too. */
1526
1527 mtctr r0 /* put # words/line in ctr */
15283: addi r6,r6,8 /* copy a cache line */
1529 ldx r0,r6,r4
1530 stdx r0,r6,r3
1531 bdnz 3b
1532 dcbst r6,r3 /* write it to memory */
1533 sync
1534 icbi r6,r3 /* flush the icache line */
1535 cmpld 0,r6,r5
1536 blt 4b
1537 sync
1538 addi r5,r5,8
1539 addi r6,r6,8
1540 blr
1541
1542.align 8
1543copy_to_here:
1544
1545/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 * disable_kernel_fp()
1547 * Disable the FPU.
1548 */
1549_GLOBAL(disable_kernel_fp)
1550 mfmsr r3
1551 rldicl r0,r3,(63-MSR_FP_LG),1
1552 rldicl r3,r0,(MSR_FP_LG+1),0
1553 mtmsrd r3 /* disable use of fpu now */
1554 isync
1555 blr
1556
1557/*
1558 * giveup_fpu(tsk)
1559 * Disable FP for the task given as the argument,
1560 * and save the floating-point registers in its thread_struct.
1561 * Enables the FPU for use in the kernel on return.
1562 */
1563_GLOBAL(giveup_fpu)
1564 mfmsr r5
1565 ori r5,r5,MSR_FP
1566 mtmsrd r5 /* enable use of fpu now */
1567 isync
1568 cmpdi 0,r3,0
1569 beqlr- /* if no previous owner, done */
1570 addi r3,r3,THREAD /* want THREAD of task */
1571 ld r5,PT_REGS(r3)
1572 cmpdi 0,r5,0
1573 SAVE_32FPRS(0, r3)
1574 mffs fr0
1575 stfd fr0,THREAD_FPSCR(r3)
1576 beq 1f
1577 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1578 li r3,MSR_FP|MSR_FE0|MSR_FE1
1579 andc r4,r4,r3 /* disable FP for previous task */
1580 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15811:
1582#ifndef CONFIG_SMP
1583 li r5,0
1584 ld r4,last_task_used_math@got(r2)
1585 std r5,0(r4)
1586#endif /* CONFIG_SMP */
1587 blr
1588
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589#ifdef CONFIG_ALTIVEC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590/*
1591 * disable_kernel_altivec()
1592 * Disable the VMX.
1593 */
1594_GLOBAL(disable_kernel_altivec)
1595 mfmsr r3
1596 rldicl r0,r3,(63-MSR_VEC_LG),1
1597 rldicl r3,r0,(MSR_VEC_LG+1),0
1598 mtmsrd r3 /* disable use of VMX now */
1599 isync
1600 blr
1601
1602/*
1603 * giveup_altivec(tsk)
1604 * Disable VMX for the task given as the argument,
1605 * and save the vector registers in its thread_struct.
1606 * Enables the VMX for use in the kernel on return.
1607 */
1608_GLOBAL(giveup_altivec)
1609 mfmsr r5
1610 oris r5,r5,MSR_VEC@h
1611 mtmsrd r5 /* enable use of VMX now */
1612 isync
1613 cmpdi 0,r3,0
1614 beqlr- /* if no previous owner, done */
1615 addi r3,r3,THREAD /* want THREAD of task */
1616 ld r5,PT_REGS(r3)
1617 cmpdi 0,r5,0
1618 SAVE_32VRS(0,r4,r3)
1619 mfvscr vr0
1620 li r4,THREAD_VSCR
1621 stvx vr0,r4,r3
1622 beq 1f
1623 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1624 lis r3,MSR_VEC@h
1625 andc r4,r4,r3 /* disable FP for previous task */
1626 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16271:
1628#ifndef CONFIG_SMP
1629 li r5,0
1630 ld r4,last_task_used_altivec@got(r2)
1631 std r5,0(r4)
1632#endif /* CONFIG_SMP */
1633 blr
1634
1635#endif /* CONFIG_ALTIVEC */
1636
1637#ifdef CONFIG_SMP
1638#ifdef CONFIG_PPC_PMAC
1639/*
1640 * On PowerMac, secondary processors starts from the reset vector, which
1641 * is temporarily turned into a call to one of the functions below.
1642 */
1643 .section ".text";
1644 .align 2 ;
1645
1646 .globl pmac_secondary_start_1
1647pmac_secondary_start_1:
1648 li r24, 1
1649 b .pmac_secondary_start
1650
1651 .globl pmac_secondary_start_2
1652pmac_secondary_start_2:
1653 li r24, 2
1654 b .pmac_secondary_start
1655
1656 .globl pmac_secondary_start_3
1657pmac_secondary_start_3:
1658 li r24, 3
1659 b .pmac_secondary_start
1660
1661_GLOBAL(pmac_secondary_start)
1662 /* turn on 64-bit mode */
1663 bl .enable_64b_mode
1664 isync
1665
1666 /* Copy some CPU settings from CPU 0 */
1667 bl .__restore_cpu_setup
1668
1669 /* pSeries do that early though I don't think we really need it */
1670 mfmsr r3
1671 ori r3,r3,MSR_RI
1672 mtmsrd r3 /* RI on */
1673
1674 /* Set up a paca value for this processor. */
1675 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1676 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1677 add r13,r13,r4 /* for this processor. */
1678 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1679
1680 /* Create a temp kernel stack for use before relocation is on. */
1681 ld r1,PACAEMERGSP(r13)
1682 subi r1,r1,STACK_FRAME_OVERHEAD
1683
1684 b .__secondary_start
1685
1686#endif /* CONFIG_PPC_PMAC */
1687
1688/*
1689 * This function is called after the master CPU has released the
1690 * secondary processors. The execution environment is relocation off.
1691 * The paca for this processor has the following fields initialized at
1692 * this point:
1693 * 1. Processor number
1694 * 2. Segment table pointer (virtual address)
1695 * On entry the following are set:
1696 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1697 * r24 = cpu# (in Linux terms)
1698 * r13 = paca virtual address
1699 * SPRG3 = paca virtual address
1700 */
1701_GLOBAL(__secondary_start)
1702
1703 HMT_MEDIUM /* Set thread priority to MEDIUM */
1704
1705 ld r2,PACATOC(r13)
1706 li r6,0
1707 stb r6,PACAPROCENABLED(r13)
1708
1709#ifndef CONFIG_PPC_ISERIES
1710 /* Initialize the page table pointer register. */
1711 LOADADDR(r6,_SDR1)
1712 ld r6,0(r6) /* get the value of _SDR1 */
1713 mtspr SDR1,r6 /* set the htab location */
1714#endif
1715 /* Initialize the first segment table (or SLB) entry */
1716 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1717 bl .stab_initialize
1718
1719 /* Initialize the kernel stack. Just a repeat for iSeries. */
1720 LOADADDR(r3,current_set)
1721 sldi r28,r24,3 /* get current_set[cpu#] */
1722 ldx r1,r3,r28
1723 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1724 std r1,PACAKSAVE(r13)
1725
1726 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1727 ori r4,r3,1 /* turn on valid bit */
1728
1729#ifdef CONFIG_PPC_ISERIES
1730 li r0,-1 /* hypervisor call */
1731 li r3,1
1732 sldi r3,r3,63 /* 0x8000000000000000 */
1733 ori r3,r3,4 /* 0x8000000000000004 */
1734 sc /* HvCall_setASR */
1735#else
1736 /* set the ASR */
1737 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1738 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1739 cmpldi r3,PLATFORM_PSERIES_LPAR
1740 bne 98f
1741 mfspr r3,PVR
1742 srwi r3,r3,16
1743 cmpwi r3,0x37 /* SStar */
1744 beq 97f
1745 cmpwi r3,0x36 /* IStar */
1746 beq 97f
1747 cmpwi r3,0x34 /* Pulsar */
1748 bne 98f
174997: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1750 HVSC /* Invoking hcall */
1751 b 99f
175298: /* !(rpa hypervisor) || !(star) */
1753 mtasr r4 /* set the stab location */
175499:
1755#endif
1756 li r7,0
1757 mtlr r7
1758
1759 /* enable MMU and jump to start_secondary */
1760 LOADADDR(r3,.start_secondary_prolog)
1761 SET_REG_TO_CONST(r4, MSR_KERNEL)
1762#ifdef DO_SOFT_DISABLE
1763 ori r4,r4,MSR_EE
1764#endif
1765 mtspr SRR0,r3
1766 mtspr SRR1,r4
1767 rfid
1768 b . /* prevent speculative execution */
1769
1770/*
1771 * Running with relocation on at this point. All we want to do is
1772 * zero the stack back-chain pointer before going into C code.
1773 */
1774_GLOBAL(start_secondary_prolog)
1775 li r3,0
1776 std r3,0(r1) /* Zero the stack frame pointer */
1777 bl .start_secondary
1778#endif
1779
1780/*
1781 * This subroutine clobbers r11 and r12
1782 */
1783_GLOBAL(enable_64b_mode)
1784 mfmsr r11 /* grab the current MSR */
1785 li r12,1
1786 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1787 or r11,r11,r12
1788 li r12,1
1789 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1790 or r11,r11,r12
1791 mtmsrd r11
1792 isync
1793 blr
1794
1795#ifdef CONFIG_PPC_MULTIPLATFORM
1796/*
1797 * This is where the main kernel code starts.
1798 */
1799_STATIC(start_here_multiplatform)
1800 /* get a new offset, now that the kernel has moved. */
1801 bl .reloc_offset
1802 mr r26,r3
1803
1804 /* Clear out the BSS. It may have been done in prom_init,
1805 * already but that's irrelevant since prom_init will soon
1806 * be detached from the kernel completely. Besides, we need
1807 * to clear it now for kexec-style entry.
1808 */
1809 LOADADDR(r11,__bss_stop)
1810 LOADADDR(r8,__bss_start)
1811 sub r11,r11,r8 /* bss size */
1812 addi r11,r11,7 /* round up to an even double word */
1813 rldicl. r11,r11,61,3 /* shift right by 3 */
1814 beq 4f
1815 addi r8,r8,-8
1816 li r0,0
1817 mtctr r11 /* zero this many doublewords */
18183: stdu r0,8(r8)
1819 bdnz 3b
18204:
1821
1822 mfmsr r6
1823 ori r6,r6,MSR_RI
1824 mtmsrd r6 /* RI on */
1825
1826#ifdef CONFIG_HMT
1827 /* Start up the second thread on cpu 0 */
1828 mfspr r3,PVR
1829 srwi r3,r3,16
1830 cmpwi r3,0x34 /* Pulsar */
1831 beq 90f
1832 cmpwi r3,0x36 /* Icestar */
1833 beq 90f
1834 cmpwi r3,0x37 /* SStar */
1835 beq 90f
1836 b 91f /* HMT not supported */
183790: li r3,0
1838 bl .hmt_start_secondary
183991:
1840#endif
1841
1842 /* The following gets the stack and TOC set up with the regs */
1843 /* pointing to the real addr of the kernel stack. This is */
1844 /* all done to support the C function call below which sets */
1845 /* up the htab. This is done because we have relocated the */
1846 /* kernel but are still running in real mode. */
1847
1848 LOADADDR(r3,init_thread_union)
1849 sub r3,r3,r26
1850
1851 /* set up a stack pointer (physical address) */
1852 addi r1,r3,THREAD_SIZE
1853 li r0,0
1854 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1855
1856 /* set up the TOC (physical address) */
1857 LOADADDR(r2,__toc_start)
1858 addi r2,r2,0x4000
1859 addi r2,r2,0x4000
1860 sub r2,r2,r26
1861
1862 LOADADDR(r3,cpu_specs)
1863 sub r3,r3,r26
1864 LOADADDR(r4,cur_cpu_spec)
1865 sub r4,r4,r26
1866 mr r5,r26
1867 bl .identify_cpu
1868
1869 /* Save some low level config HIDs of CPU0 to be copied to
1870 * other CPUs later on, or used for suspend/resume
1871 */
1872 bl .__save_cpu_setup
1873 sync
1874
1875 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1876 * note that boot_cpuid can always be 0 nowadays since there is
1877 * nowhere it can be initialized differently before we reach this
1878 * code
1879 */
1880 LOADADDR(r27, boot_cpuid)
1881 sub r27,r27,r26
1882 lwz r27,0(r27)
1883
1884 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1885 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1886 add r13,r13,r24 /* for this processor. */
1887 sub r13,r13,r26 /* convert to physical addr */
1888 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1889
1890 /* Do very early kernel initializations, including initial hash table,
1891 * stab and slb setup before we turn on relocation. */
1892
1893 /* Restore parameters passed from prom_init/kexec */
1894 mr r3,r31
1895 bl .early_setup
1896
1897 /* set the ASR */
1898 ld r3,PACASTABREAL(r13)
1899 ori r4,r3,1 /* turn on valid bit */
1900 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1901 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1902 cmpldi r3,PLATFORM_PSERIES_LPAR
1903 bne 98f
1904 mfspr r3,PVR
1905 srwi r3,r3,16
1906 cmpwi r3,0x37 /* SStar */
1907 beq 97f
1908 cmpwi r3,0x36 /* IStar */
1909 beq 97f
1910 cmpwi r3,0x34 /* Pulsar */
1911 bne 98f
191297: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1913 HVSC /* Invoking hcall */
1914 b 99f
191598: /* !(rpa hypervisor) || !(star) */
1916 mtasr r4 /* set the stab location */
191799:
1918 /* Set SDR1 (hash table pointer) */
1919 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1920 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1921 /* Test if bit 0 is set (LPAR bit) */
1922 andi. r3,r3,0x1
1923 bne 98f
1924 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1925 sub r6,r6,r26
1926 ld r6,0(r6) /* get the value of _SDR1 */
1927 mtspr SDR1,r6 /* set the htab location */
192898:
1929 LOADADDR(r3,.start_here_common)
1930 SET_REG_TO_CONST(r4, MSR_KERNEL)
1931 mtspr SRR0,r3
1932 mtspr SRR1,r4
1933 rfid
1934 b . /* prevent speculative execution */
1935#endif /* CONFIG_PPC_MULTIPLATFORM */
1936
1937 /* This is where all platforms converge execution */
1938_STATIC(start_here_common)
1939 /* relocation is on at this point */
1940
1941 /* The following code sets up the SP and TOC now that we are */
1942 /* running with translation enabled. */
1943
1944 LOADADDR(r3,init_thread_union)
1945
1946 /* set up the stack */
1947 addi r1,r3,THREAD_SIZE
1948 li r0,0
1949 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1950
1951 /* Apply the CPUs-specific fixups (nop out sections not relevant
1952 * to this CPU
1953 */
1954 li r3,0
1955 bl .do_cpu_ftr_fixups
1956
1957 LOADADDR(r26, boot_cpuid)
1958 lwz r26,0(r26)
1959
1960 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1961 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1962 add r13,r13,r24 /* for this processor. */
1963 mtspr SPRG3,r13
1964
1965 /* ptr to current */
1966 LOADADDR(r4,init_task)
1967 std r4,PACACURRENT(r13)
1968
1969 /* Load the TOC */
1970 ld r2,PACATOC(r13)
1971 std r1,PACAKSAVE(r13)
1972
1973 bl .setup_system
1974
1975 /* Load up the kernel context */
19765:
1977#ifdef DO_SOFT_DISABLE
1978 li r5,0
1979 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1980 mfmsr r5
1981 ori r5,r5,MSR_EE /* Hard Enabled */
1982 mtmsrd r5
1983#endif
1984
1985 bl .start_kernel
1986
1987_GLOBAL(__setup_cpu_power3)
1988 blr
1989
1990_GLOBAL(hmt_init)
1991#ifdef CONFIG_HMT
1992 LOADADDR(r5, hmt_thread_data)
1993 mfspr r7,PVR
1994 srwi r7,r7,16
1995 cmpwi r7,0x34 /* Pulsar */
1996 beq 90f
1997 cmpwi r7,0x36 /* Icestar */
1998 beq 91f
1999 cmpwi r7,0x37 /* SStar */
2000 beq 91f
2001 b 101f
200290: mfspr r6,PIR
2003 andi. r6,r6,0x1f
2004 b 92f
200591: mfspr r6,PIR
2006 andi. r6,r6,0x3ff
200792: sldi r4,r24,3
2008 stwx r6,r5,r4
2009 bl .hmt_start_secondary
2010 b 101f
2011
2012__hmt_secondary_hold:
2013 LOADADDR(r5, hmt_thread_data)
2014 clrldi r5,r5,4
2015 li r7,0
2016 mfspr r6,PIR
2017 mfspr r8,PVR
2018 srwi r8,r8,16
2019 cmpwi r8,0x34
2020 bne 93f
2021 andi. r6,r6,0x1f
2022 b 103f
202393: andi. r6,r6,0x3f
2024
2025103: lwzx r8,r5,r7
2026 cmpw r8,r6
2027 beq 104f
2028 addi r7,r7,8
2029 b 103b
2030
2031104: addi r7,r7,4
2032 lwzx r9,r5,r7
2033 mr r24,r9
2034101:
2035#endif
2036 mr r3,r24
2037 b .pSeries_secondary_smp_init
2038
2039#ifdef CONFIG_HMT
2040_GLOBAL(hmt_start_secondary)
2041 LOADADDR(r4,__hmt_secondary_hold)
2042 clrldi r4,r4,4
2043 mtspr NIADORM, r4
2044 mfspr r4, MSRDORM
2045 li r5, -65
2046 and r4, r4, r5
2047 mtspr MSRDORM, r4
2048 lis r4,0xffef
2049 ori r4,r4,0x7403
2050 mtspr TSC, r4
2051 li r4,0x1f4
2052 mtspr TST, r4
2053 mfspr r4, HID0
2054 ori r4, r4, 0x1
2055 mtspr HID0, r4
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -07002056 mfspr r4, SPRN_CTRLF
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 oris r4, r4, 0x40
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -07002058 mtspr SPRN_CTRLT, r4
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 blr
2060#endif
2061
Olof Johansson75eedfe2005-08-04 12:53:29 -07002062#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063_GLOBAL(smp_release_cpus)
2064 /* All secondary cpus are spinning on a common
2065 * spinloop, release them all now so they can start
2066 * to spin on their individual paca spinloops.
2067 * For non SMP kernels, the secondary cpus never
2068 * get out of the common spinloop.
2069 */
2070 li r3,1
2071 LOADADDR(r5,__secondary_hold_spinloop)
2072 std r3,0(r5)
2073 sync
2074 blr
2075#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
2076
2077
2078/*
2079 * We put a few things here that have to be page-aligned.
2080 * This stuff goes at the beginning of the data segment,
2081 * which is page-aligned.
2082 */
2083 .data
2084 .align 12
2085 .globl sdata
2086sdata:
2087 .globl empty_zero_page
2088empty_zero_page:
2089 .space 4096
2090
2091 .globl swapper_pg_dir
2092swapper_pg_dir:
2093 .space 4096
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095/*
2096 * This space gets a copy of optional info passed to us by the bootstrap
2097 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2098 */
2099 .globl cmd_line
2100cmd_line:
2101 .space COMMAND_LINE_SIZE