blob: c7462fae766237286bbd8a6e50fd0665fb56d3b7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/processor.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/systemcfg.h>
32#include <asm/ppc_asm.h>
33#include <asm/offsets.h>
34#include <asm/bug.h>
35#include <asm/cputable.h>
36#include <asm/setup.h>
37#include <asm/hvcall.h>
Stephen Rothwell2ad56492005-08-17 13:01:50 +100038#include <asm/iSeries/LparMap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE
42#endif
43
44/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 * We layout physical memory as follows:
46 * 0x0000 - 0x00ff : Secondary processor spin code
47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
David Gibsonc59c4642005-08-19 14:52:31 +100048 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
David Gibsonec465512005-08-19 14:52:31 +100050 * 0x7000 - 0x7fff : FWNMI data area
David Gibsonc59c4642005-08-19 14:52:31 +100051 * 0x8000 - : Early init and support code
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 */
53
54/*
55 * SPRG Usage
56 *
57 * Register Definition
58 *
59 * SPRG0 reserved for hypervisor
60 * SPRG1 temp - used to save gpr
61 * SPRG2 temp - used to save gpr
62 * SPRG3 virt addr of paca
63 */
64
65/*
66 * Entering into this code we make the following assumptions:
67 * For pSeries:
68 * 1. The MMU is off & open firmware is running in real mode.
69 * 2. The kernel is entered at __start
70 *
71 * For iSeries:
72 * 1. The MMU is on (as it always is for iSeries)
73 * 2. The kernel is entered at system_reset_iSeries
74 */
75
76 .text
77 .globl _stext
78_stext:
79#ifdef CONFIG_PPC_MULTIPLATFORM
80_GLOBAL(__start)
81 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION
83 b .__start_initialization_multiplatform
84END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */
86
87 /* Catch branch to 0 in real mode */
88 trap
David Gibson60ba4492005-08-19 14:52:32 +100089
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#ifdef CONFIG_PPC_ISERIES
91 /*
92 * At offset 0x20, there is a pointer to iSeries LPAR data.
93 * This is required by the hypervisor
94 */
95 . = 0x20
96 .llong hvReleaseData-KERNELBASE
97
98 /*
99 * At offset 0x28 and 0x30 are offsets to the msChunks
100 * array (used by the iSeries LPAR debugger to do translation
101 * between physical addresses and absolute addresses) and
102 * to the pidhash table (also used by the debugger)
103 */
104 .llong msChunks-KERNELBASE
105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
106
107 /* Offset 0x38 - Pointer to start of embedded System.map */
108 .globl embedded_sysmap_start
109embedded_sysmap_start:
110 .llong 0
111 /* Offset 0x40 - Pointer to end of embedded System.map */
112 .globl embedded_sysmap_end
113embedded_sysmap_end:
114 .llong 0
115
David Gibson60ba4492005-08-19 14:52:32 +1000116#endif /* CONFIG_PPC_ISERIES */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118 /* Secondary processors spin on this value until it goes to 1. */
119 .globl __secondary_hold_spinloop
120__secondary_hold_spinloop:
121 .llong 0x0
122
123 /* Secondary processors write this value with their cpu # */
124 /* after they enter the spin loop immediately below. */
125 .globl __secondary_hold_acknowledge
126__secondary_hold_acknowledge:
127 .llong 0x0
128
129 . = 0x60
130/*
131 * The following code is used on pSeries to hold secondary processors
132 * in a spin loop after they have been freed from OpenFirmware, but
133 * before the bulk of the kernel has been relocated. This code
134 * is relocated to physical address 0x60 before prom_init is run.
135 * All of it must fit below the first exception vector at 0x100.
136 */
137_GLOBAL(__secondary_hold)
138 mfmsr r24
139 ori r24,r24,MSR_RI
140 mtmsrd r24 /* RI on */
141
142 /* Grab our linux cpu number */
143 mr r24,r3
144
145 /* Tell the master cpu we're here */
146 /* Relocation is off & we are located at an address less */
147 /* than 0x100, so only need to grab low order offset. */
148 std r24,__secondary_hold_acknowledge@l(0)
149 sync
150
David Gibson91a57fc2005-08-19 14:52:32 +1000151 /* All secondary cpus wait here until told to start. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152100: ld r4,__secondary_hold_spinloop@l(0)
153 cmpdi 0,r4,1
154 bne 100b
155
156#ifdef CONFIG_HMT
157 b .hmt_init
158#else
159#ifdef CONFIG_SMP
160 mr r3,r24
161 b .pSeries_secondary_smp_init
162#else
163 BUG_OPCODE
164#endif
165#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167/* This value is used to mark exception frames on the stack. */
168 .section ".toc","aw"
169exception_marker:
170 .tc ID_72656773_68657265[TC],0x7265677368657265
171 .text
172
173/*
174 * The following macros define the code that appears as
175 * the prologue to each of the exception handlers. They
176 * are split into two parts to allow a single kernel binary
177 * to be used for pSeries and iSeries.
178 * LOL. One day... - paulus
179 */
180
181/*
182 * We make as much of the exception code common between native
183 * exception handlers (including pSeries LPAR) and iSeries LPAR
184 * implementations as possible.
185 */
186
187/*
188 * This is the start of the interrupt handlers for pSeries
189 * This code runs with relocation off.
190 */
191#define EX_R9 0
192#define EX_R10 8
193#define EX_R11 16
194#define EX_R12 24
195#define EX_R13 32
196#define EX_SRR0 40
197#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
198#define EX_DAR 48
199#define EX_LR 48 /* SLB miss saves LR, but not DAR */
200#define EX_DSISR 56
201#define EX_CCR 60
202
203#define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRG1; \
210 std r9,area+EX_R13(r13); \
211 mfcr r9; \
212 clrrdi r12,r13,32; /* get high part of &label */ \
213 mfmsr r10; \
214 mfspr r11,SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SRR0,r12; \
218 mfspr r12,SRR1; /* and SRR1 */ \
219 mtspr SRR1,r10; \
220 rfid; \
221 b . /* prevent speculative execution */
222
223/*
224 * This is the start of the interrupt handlers for iSeries
225 * This code runs with relocation on.
226 */
227#define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRG1; \
234 std r9,area+EX_R13(r13); \
235 mfcr r9
236
237#define EXCEPTION_PROLOG_ISERIES_2 \
238 mfmsr r10; \
239 ld r11,PACALPPACA+LPPACASRR0(r13); \
240 ld r12,PACALPPACA+LPPACASRR1(r13); \
241 ori r10,r10,MSR_RI; \
242 mtmsrd r10,1
243
244/*
245 * The common exception prolog is used for all except a few exceptions
246 * such as a segment miss on a kernel address. We have to be prepared
247 * to take another exception from the point where we first touch the
248 * kernel stack onwards.
249 *
250 * On entry r13 points to the paca, r9-r13 are saved in the paca,
251 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
252 * SRR1, and relocation is on.
253 */
254#define EXCEPTION_PROLOG_COMMON(n, area) \
255 andi. r10,r12,MSR_PR; /* See if coming from user */ \
256 mr r10,r1; /* Save r1 */ \
257 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
258 beq- 1f; \
259 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2601: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
261 bge- cr1,bad_stack; /* abort if it is */ \
262 std r9,_CCR(r1); /* save CR in stackframe */ \
263 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
264 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
265 std r10,0(r1); /* make stack chain pointer */ \
266 std r0,GPR0(r1); /* save r0 in stackframe */ \
267 std r10,GPR1(r1); /* save r1 in stackframe */ \
268 std r2,GPR2(r1); /* save r2 in stackframe */ \
269 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
270 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
271 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
272 ld r10,area+EX_R10(r13); \
273 std r9,GPR9(r1); \
274 std r10,GPR10(r1); \
275 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
276 ld r10,area+EX_R12(r13); \
277 ld r11,area+EX_R13(r13); \
278 std r9,GPR11(r1); \
279 std r10,GPR12(r1); \
280 std r11,GPR13(r1); \
281 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
282 mflr r9; /* save LR in stackframe */ \
283 std r9,_LINK(r1); \
284 mfctr r10; /* save CTR in stackframe */ \
285 std r10,_CTR(r1); \
286 mfspr r11,XER; /* save XER in stackframe */ \
287 std r11,_XER(r1); \
288 li r9,(n)+1; \
289 std r9,_TRAP(r1); /* set trap number */ \
290 li r10,0; \
291 ld r11,exception_marker@toc(r2); \
292 std r10,RESULT(r1); /* clear regs->result */ \
293 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
294
295/*
296 * Exception vectors.
297 */
298#define STD_EXCEPTION_PSERIES(n, label) \
299 . = n; \
300 .globl label##_pSeries; \
301label##_pSeries: \
302 HMT_MEDIUM; \
303 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700304 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306
307#define STD_EXCEPTION_ISERIES(n, label, area) \
308 .globl label##_iSeries; \
309label##_iSeries: \
310 HMT_MEDIUM; \
311 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700312 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \
315 b label##_common
316
317#define MASKABLE_EXCEPTION_ISERIES(n, label) \
318 .globl label##_iSeries; \
319label##_iSeries: \
320 HMT_MEDIUM; \
321 mtspr SPRG1,r13; /* save r13 */ \
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700322 RUNLATCH_ON(r13); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \
325 cmpwi 0,r10,0; \
326 beq- label##_iSeries_masked; \
327 EXCEPTION_PROLOG_ISERIES_2; \
328 b label##_common; \
329
330#ifdef DO_SOFT_DISABLE
331#define DISABLE_INTS \
332 lbz r10,PACAPROCENABLED(r13); \
333 li r11,0; \
334 std r10,SOFTE(r1); \
335 mfmsr r10; \
336 stb r11,PACAPROCENABLED(r13); \
337 ori r10,r10,MSR_EE; \
338 mtmsrd r10,1
339
340#define ENABLE_INTS \
341 lbz r10,PACAPROCENABLED(r13); \
342 mfmsr r11; \
343 std r10,SOFTE(r1); \
344 ori r11,r11,MSR_EE; \
345 mtmsrd r11,1
346
347#else /* hard enable/disable interrupts */
348#define DISABLE_INTS
349
350#define ENABLE_INTS \
351 ld r12,_MSR(r1); \
352 mfmsr r11; \
353 rlwimi r11,r12,0,MSR_EE; \
354 mtmsrd r11,1
355
356#endif
357
358#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
359 .align 7; \
360 .globl label##_common; \
361label##_common: \
362 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
363 DISABLE_INTS; \
364 bl .save_nvgprs; \
365 addi r3,r1,STACK_FRAME_OVERHEAD; \
366 bl hdlr; \
367 b .ret_from_except
368
369#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
370 .align 7; \
371 .globl label##_common; \
372label##_common: \
373 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
374 DISABLE_INTS; \
375 addi r3,r1,STACK_FRAME_OVERHEAD; \
376 bl hdlr; \
377 b .ret_from_except_lite
378
379/*
380 * Start of pSeries system interrupt routines
381 */
382 . = 0x100
383 .globl __start_interrupts
384__start_interrupts:
385
386 STD_EXCEPTION_PSERIES(0x100, system_reset)
387
388 . = 0x200
389_machine_check_pSeries:
390 HMT_MEDIUM
391 mtspr SPRG1,r13 /* save r13 */
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700392 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394
395 . = 0x300
396 .globl data_access_pSeries
397data_access_pSeries:
398 HMT_MEDIUM
399 mtspr SPRG1,r13
400BEGIN_FTR_SECTION
401 mtspr SPRG2,r12
402 mfspr r13,DAR
403 mfspr r12,DSISR
404 srdi r13,r13,60
405 rlwimi r13,r12,16,0x20
406 mfcr r12
407 cmpwi r13,0x2c
408 beq .do_stab_bolted_pSeries
409 mtcrf 0x80,r12
410 mfspr r12,SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413
414 . = 0x380
415 .globl data_access_slb_pSeries
416data_access_slb_pSeries:
417 HMT_MEDIUM
418 mtspr SPRG1,r13
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700419 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 mfspr r13,SPRG3 /* get paca address into r13 */
421 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
422 std r10,PACA_EXSLB+EX_R10(r13)
423 std r11,PACA_EXSLB+EX_R11(r13)
424 std r12,PACA_EXSLB+EX_R12(r13)
425 std r3,PACA_EXSLB+EX_R3(r13)
426 mfspr r9,SPRG1
427 std r9,PACA_EXSLB+EX_R13(r13)
428 mfcr r9
429 mfspr r12,SRR1 /* and SRR1 */
430 mfspr r3,DAR
431 b .do_slb_miss /* Rel. branch works in real mode */
432
433 STD_EXCEPTION_PSERIES(0x400, instruction_access)
434
435 . = 0x480
436 .globl instruction_access_slb_pSeries
437instruction_access_slb_pSeries:
438 HMT_MEDIUM
439 mtspr SPRG1,r13
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700440 RUNLATCH_ON(r13)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 mfspr r13,SPRG3 /* get paca address into r13 */
442 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
443 std r10,PACA_EXSLB+EX_R10(r13)
444 std r11,PACA_EXSLB+EX_R11(r13)
445 std r12,PACA_EXSLB+EX_R12(r13)
446 std r3,PACA_EXSLB+EX_R3(r13)
447 mfspr r9,SPRG1
448 std r9,PACA_EXSLB+EX_R13(r13)
449 mfcr r9
450 mfspr r12,SRR1 /* and SRR1 */
451 mfspr r3,SRR0 /* SRR0 is faulting address */
452 b .do_slb_miss /* Rel. branch works in real mode */
453
454 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
455 STD_EXCEPTION_PSERIES(0x600, alignment)
456 STD_EXCEPTION_PSERIES(0x700, program_check)
457 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
458 STD_EXCEPTION_PSERIES(0x900, decrementer)
459 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
460 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
461
462 . = 0xc00
463 .globl system_call_pSeries
464system_call_pSeries:
465 HMT_MEDIUM
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700466 RUNLATCH_ON(r9)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 mr r9,r13
468 mfmsr r10
469 mfspr r13,SPRG3
470 mfspr r11,SRR0
471 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1
477 mtspr SRR1,r10
478 rfid
479 b . /* prevent speculative execution */
480
481 STD_EXCEPTION_PSERIES(0xd00, single_step)
482 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
483
484 /* We need to deal with the Altivec unavailable exception
485 * here which is at 0xf20, thus in the middle of the
486 * prolog code of the PerformanceMonitor one. A little
487 * trickery is thus necessary
488 */
489 . = 0xf00
490 b performance_monitor_pSeries
491
492 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
493
494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
496
David Gibsonec465512005-08-19 14:52:31 +1000497 . = 0x3000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
David Gibsonec465512005-08-19 14:52:31 +1000499/*** pSeries interrupt support ***/
500
501 /* moved from 0xf00 */
502 STD_EXCEPTION_PSERIES(., performance_monitor)
503
504 .align 7
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505_GLOBAL(do_stab_bolted_pSeries)
506 mtcrf 0x80,r12
507 mfspr r12,SPRG2
508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
509
David Gibsonec465512005-08-19 14:52:31 +1000510/*
511 * Vectors for the FWNMI option. Share common code.
512 */
513 .globl system_reset_fwnmi
514system_reset_fwnmi:
515 HMT_MEDIUM
516 mtspr SPRG1,r13 /* save r13 */
517 RUNLATCH_ON(r13)
518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
David Gibsonec465512005-08-19 14:52:31 +1000520 .globl machine_check_fwnmi
521machine_check_fwnmi:
522 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
David Gibson2e2446e2005-08-19 14:52:31 +1000527#ifdef CONFIG_PPC_ISERIES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528/*** ISeries-LPAR interrupt handlers ***/
529
530 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
531
532 .globl data_access_iSeries
533data_access_iSeries:
534 mtspr SPRG1,r13
535BEGIN_FTR_SECTION
536 mtspr SPRG2,r12
537 mfspr r13,DAR
538 mfspr r12,DSISR
539 srdi r13,r13,60
540 rlwimi r13,r12,16,0x20
541 mfcr r12
542 cmpwi r13,0x2c
543 beq .do_stab_bolted_iSeries
544 mtcrf 0x80,r12
545 mfspr r12,SPRG2
546END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
547 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
548 EXCEPTION_PROLOG_ISERIES_2
549 b data_access_common
550
551.do_stab_bolted_iSeries:
552 mtcrf 0x80,r12
553 mfspr r12,SPRG2
554 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
555 EXCEPTION_PROLOG_ISERIES_2
556 b .do_stab_bolted
557
558 .globl data_access_slb_iSeries
559data_access_slb_iSeries:
560 mtspr SPRG1,r13 /* save r13 */
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 std r3,PACA_EXSLB+EX_R3(r13)
563 ld r12,PACALPPACA+LPPACASRR1(r13)
564 mfspr r3,DAR
565 b .do_slb_miss
566
567 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
568
569 .globl instruction_access_slb_iSeries
570instruction_access_slb_iSeries:
571 mtspr SPRG1,r13 /* save r13 */
572 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
573 std r3,PACA_EXSLB+EX_R3(r13)
574 ld r12,PACALPPACA+LPPACASRR1(r13)
575 ld r3,PACALPPACA+LPPACASRR0(r13)
576 b .do_slb_miss
577
578 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
579 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
580 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
581 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
582 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
583 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
584 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
585
586 .globl system_call_iSeries
587system_call_iSeries:
588 mr r9,r13
589 mfspr r13,SPRG3
590 EXCEPTION_PROLOG_ISERIES_2
591 b system_call_common
592
593 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
594 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
595 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
596
597 .globl system_reset_iSeries
598system_reset_iSeries:
599 mfspr r13,SPRG3 /* Get paca address */
600 mfmsr r24
601 ori r24,r24,MSR_RI
602 mtmsrd r24 /* RI on */
603 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
604 cmpwi 0,r24,0 /* Are we processor 0? */
605 beq .__start_initialization_iSeries /* Start up the first processor */
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -0700606 mfspr r4,SPRN_CTRLF
607 li r5,CTRL_RUNLATCH /* Turn off the run light */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 andc r4,r4,r5
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -0700609 mtspr SPRN_CTRLT,r4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
6111:
612 HMT_LOW
613#ifdef CONFIG_SMP
614 lbz r23,PACAPROCSTART(r13) /* Test if this processor
615 * should start */
616 sync
617 LOADADDR(r3,current_set)
618 sldi r28,r24,3 /* get current_set[cpu#] */
619 ldx r3,r3,r28
620 addi r1,r3,THREAD_SIZE
621 subi r1,r1,STACK_FRAME_OVERHEAD
622
623 cmpwi 0,r23,0
624 beq iSeries_secondary_smp_loop /* Loop until told to go */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 bne .__secondary_start /* Loop until told to go */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626iSeries_secondary_smp_loop:
627 /* Let the Hypervisor know we are alive */
628 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
629 lis r3,0x8002
630 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
631#else /* CONFIG_SMP */
632 /* Yield the processor. This is required for non-SMP kernels
633 which are running on multi-threaded machines. */
634 lis r3,0x8000
635 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
636 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
637 li r4,0 /* "yield timed" */
638 li r5,-1 /* "yield forever" */
639#endif /* CONFIG_SMP */
640 li r0,-1 /* r0=-1 indicates a Hypervisor call */
641 sc /* Invoke the hypervisor via a system call */
642 mfspr r13,SPRG3 /* Put r13 back ???? */
643 b 1b /* If SMP not configured, secondaries
644 * loop forever */
645
646 .globl decrementer_iSeries_masked
647decrementer_iSeries_masked:
648 li r11,1
649 stb r11,PACALPPACA+LPPACADECRINT(r13)
650 lwz r12,PACADEFAULTDECR(r13)
651 mtspr SPRN_DEC,r12
652 /* fall through */
653
654 .globl hardware_interrupt_iSeries_masked
655hardware_interrupt_iSeries_masked:
656 mtcrf 0x80,r9 /* Restore regs */
657 ld r11,PACALPPACA+LPPACASRR0(r13)
658 ld r12,PACALPPACA+LPPACASRR1(r13)
659 mtspr SRR0,r11
660 mtspr SRR1,r12
661 ld r9,PACA_EXGEN+EX_R9(r13)
662 ld r10,PACA_EXGEN+EX_R10(r13)
663 ld r11,PACA_EXGEN+EX_R11(r13)
664 ld r12,PACA_EXGEN+EX_R12(r13)
665 ld r13,PACA_EXGEN+EX_R13(r13)
666 rfid
667 b . /* prevent speculative execution */
Stephen Rothwell2ad56492005-08-17 13:01:50 +1000668#endif /* CONFIG_PPC_ISERIES */
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670/*** Common interrupt handlers ***/
671
672 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
673
674 /*
675 * Machine check is different because we use a different
676 * save area: PACA_EXMC instead of PACA_EXGEN.
677 */
678 .align 7
679 .globl machine_check_common
680machine_check_common:
681 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
682 DISABLE_INTS
683 bl .save_nvgprs
684 addi r3,r1,STACK_FRAME_OVERHEAD
685 bl .machine_check_exception
686 b .ret_from_except
687
688 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
689 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
690 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
691 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
692 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
693 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
694 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
695#ifdef CONFIG_ALTIVEC
696 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
697#else
698 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
699#endif
700
701/*
702 * Here we have detected that the kernel stack pointer is bad.
703 * R9 contains the saved CR, r13 points to the paca,
704 * r10 contains the (bad) kernel stack pointer,
705 * r11 and r12 contain the saved SRR0 and SRR1.
David Gibson91a57fc2005-08-19 14:52:32 +1000706 * We switch to using an emergency stack, save the registers there,
707 * and call kernel_bad_stack(), which panics.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 */
709bad_stack:
710 ld r1,PACAEMERGSP(r13)
711 subi r1,r1,64+INT_FRAME_SIZE
712 std r9,_CCR(r1)
713 std r10,GPR1(r1)
714 std r11,_NIP(r1)
715 std r12,_MSR(r1)
716 mfspr r11,DAR
717 mfspr r12,DSISR
718 std r11,_DAR(r1)
719 std r12,_DSISR(r1)
720 mflr r10
721 mfctr r11
722 mfxer r12
723 std r10,_LINK(r1)
724 std r11,_CTR(r1)
725 std r12,_XER(r1)
726 SAVE_GPR(0,r1)
727 SAVE_GPR(2,r1)
728 SAVE_4GPRS(3,r1)
729 SAVE_2GPRS(7,r1)
730 SAVE_10GPRS(12,r1)
731 SAVE_10GPRS(22,r1)
732 addi r11,r1,INT_FRAME_SIZE
733 std r11,0(r1)
734 li r12,0
735 std r12,0(r11)
736 ld r2,PACATOC(r13)
7371: addi r3,r1,STACK_FRAME_OVERHEAD
738 bl .kernel_bad_stack
739 b 1b
740
741/*
742 * Return from an exception with minimal checks.
743 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
744 * If interrupts have been enabled, or anything has been
745 * done that might have changed the scheduling status of
746 * any task or sent any task a signal, you should use
747 * ret_from_except or ret_from_except_lite instead of this.
748 */
749fast_exception_return:
750 ld r12,_MSR(r1)
751 ld r11,_NIP(r1)
752 andi. r3,r12,MSR_RI /* check if RI is set */
753 beq- unrecov_fer
754 ld r3,_CCR(r1)
755 ld r4,_LINK(r1)
756 ld r5,_CTR(r1)
757 ld r6,_XER(r1)
758 mtcr r3
759 mtlr r4
760 mtctr r5
761 mtxer r6
762 REST_GPR(0, r1)
763 REST_8GPRS(2, r1)
764
765 mfmsr r10
766 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
767 mtmsrd r10,1
768
769 mtspr SRR1,r12
770 mtspr SRR0,r11
771 REST_4GPRS(10, r1)
772 ld r1,GPR1(r1)
773 rfid
774 b . /* prevent speculative execution */
775
776unrecov_fer:
777 bl .save_nvgprs
7781: addi r3,r1,STACK_FRAME_OVERHEAD
779 bl .unrecoverable_exception
780 b 1b
781
782/*
783 * Here r13 points to the paca, r9 contains the saved CR,
784 * SRR0 and SRR1 are saved in r11 and r12,
785 * r9 - r13 are saved in paca->exgen.
786 */
787 .align 7
788 .globl data_access_common
789data_access_common:
Anton Blanchard8dc4fd82005-07-07 17:56:12 -0700790 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 mfspr r10,DAR
792 std r10,PACA_EXGEN+EX_DAR(r13)
793 mfspr r10,DSISR
794 stw r10,PACA_EXGEN+EX_DSISR(r13)
795 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
796 ld r3,PACA_EXGEN+EX_DAR(r13)
797 lwz r4,PACA_EXGEN+EX_DSISR(r13)
798 li r5,0x300
799 b .do_hash_page /* Try to handle as hpte fault */
800
801 .align 7
802 .globl instruction_access_common
803instruction_access_common:
804 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
805 ld r3,_NIP(r1)
806 andis. r4,r12,0x5820
807 li r5,0x400
808 b .do_hash_page /* Try to handle as hpte fault */
809
810 .align 7
811 .globl hardware_interrupt_common
812 .globl hardware_interrupt_entry
813hardware_interrupt_common:
814 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
815hardware_interrupt_entry:
816 DISABLE_INTS
817 addi r3,r1,STACK_FRAME_OVERHEAD
818 bl .do_IRQ
819 b .ret_from_except_lite
820
821 .align 7
822 .globl alignment_common
823alignment_common:
824 mfspr r10,DAR
825 std r10,PACA_EXGEN+EX_DAR(r13)
826 mfspr r10,DSISR
827 stw r10,PACA_EXGEN+EX_DSISR(r13)
828 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
829 ld r3,PACA_EXGEN+EX_DAR(r13)
830 lwz r4,PACA_EXGEN+EX_DSISR(r13)
831 std r3,_DAR(r1)
832 std r4,_DSISR(r1)
833 bl .save_nvgprs
834 addi r3,r1,STACK_FRAME_OVERHEAD
835 ENABLE_INTS
836 bl .alignment_exception
837 b .ret_from_except
838
839 .align 7
840 .globl program_check_common
841program_check_common:
842 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
843 bl .save_nvgprs
844 addi r3,r1,STACK_FRAME_OVERHEAD
845 ENABLE_INTS
846 bl .program_check_exception
847 b .ret_from_except
848
849 .align 7
850 .globl fp_unavailable_common
851fp_unavailable_common:
852 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
853 bne .load_up_fpu /* if from user, just load it up */
854 bl .save_nvgprs
855 addi r3,r1,STACK_FRAME_OVERHEAD
856 ENABLE_INTS
857 bl .kernel_fp_unavailable_exception
858 BUG_OPCODE
859
David Gibsonec465512005-08-19 14:52:31 +1000860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 .align 7
917 .globl altivec_unavailable_common
918altivec_unavailable_common:
919 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
920#ifdef CONFIG_ALTIVEC
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700921BEGIN_FTR_SECTION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 bne .load_up_altivec /* if from user, just load it up */
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700923END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924#endif
925 bl .save_nvgprs
926 addi r3,r1,STACK_FRAME_OVERHEAD
927 ENABLE_INTS
928 bl .altivec_unavailable_exception
929 b .ret_from_except
930
David Gibsonec465512005-08-19 14:52:31 +1000931#ifdef CONFIG_ALTIVEC
932/*
933 * load_up_altivec(unused, unused, tsk)
934 * Disable VMX for the task which had it previously,
935 * and save its vector registers in its thread_struct.
936 * Enables the VMX for use in the kernel on return.
937 * On SMP we know the VMX is free, since we give it up every
938 * switch (ie, no lazy save of the vector registers).
939 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
940 */
941_STATIC(load_up_altivec)
942 mfmsr r5 /* grab the current MSR */
943 oris r5,r5,MSR_VEC@h
944 mtmsrd r5 /* enable use of VMX now */
945 isync
946
947/*
948 * For SMP, we don't do lazy VMX switching because it just gets too
949 * horrendously complex, especially when a task switches from one CPU
950 * to another. Instead we call giveup_altvec in switch_to.
951 * VRSAVE isn't dealt with here, that is done in the normal context
952 * switch code. Note that we could rely on vrsave value to eventually
953 * avoid saving all of the VREGs here...
954 */
955#ifndef CONFIG_SMP
956 ld r3,last_task_used_altivec@got(r2)
957 ld r4,0(r3)
958 cmpdi 0,r4,0
959 beq 1f
960 /* Save VMX state to last_task_used_altivec's THREAD struct */
961 addi r4,r4,THREAD
962 SAVE_32VRS(0,r5,r4)
963 mfvscr vr0
964 li r10,THREAD_VSCR
965 stvx vr0,r10,r4
966 /* Disable VMX for last_task_used_altivec */
967 ld r5,PT_REGS(r4)
968 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
969 lis r6,MSR_VEC@h
970 andc r4,r4,r6
971 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9721:
973#endif /* CONFIG_SMP */
974 /* Hack: if we get an altivec unavailable trap with VRSAVE
975 * set to all zeros, we assume this is a broken application
976 * that fails to set it properly, and thus we switch it to
977 * all 1's
978 */
979 mfspr r4,SPRN_VRSAVE
980 cmpdi 0,r4,0
981 bne+ 1f
982 li r4,-1
983 mtspr SPRN_VRSAVE,r4
9841:
985 /* enable use of VMX after return */
986 ld r4,PACACURRENT(r13)
987 addi r5,r4,THREAD /* Get THREAD */
988 oris r12,r12,MSR_VEC@h
989 std r12,_MSR(r1)
990 li r4,1
991 li r10,THREAD_VSCR
992 stw r4,THREAD_USED_VR(r5)
993 lvx vr0,r10,r5
994 mtvscr vr0
995 REST_32VRS(0,r4,r5)
996#ifndef CONFIG_SMP
997 /* Update last_task_used_math to 'current' */
998 subi r4,r5,THREAD /* Back to 'current' */
999 std r4,0(r3)
1000#endif /* CONFIG_SMP */
1001 /* restore registers and return */
1002 b fast_exception_return
1003#endif /* CONFIG_ALTIVEC */
1004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005/*
1006 * Hash table stuff
1007 */
1008 .align 7
1009_GLOBAL(do_hash_page)
1010 std r3,_DAR(r1)
1011 std r4,_DSISR(r1)
1012
1013 andis. r0,r4,0xa450 /* weird error? */
1014 bne- .handle_page_fault /* if not, try to insert a HPTE */
1015BEGIN_FTR_SECTION
1016 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1017 bne- .do_ste_alloc /* If so handle it */
1018END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1019
1020 /*
1021 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1022 * accessing a userspace segment (even from the kernel). We assume
1023 * kernel addresses always have the high bit set.
1024 */
1025 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1026 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1027 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1028 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1029 ori r4,r4,1 /* add _PAGE_PRESENT */
1030 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1031
1032 /*
1033 * On iSeries, we soft-disable interrupts here, then
1034 * hard-enable interrupts so that the hash_page code can spin on
1035 * the hash_table_lock without problems on a shared processor.
1036 */
1037 DISABLE_INTS
1038
1039 /*
1040 * r3 contains the faulting address
1041 * r4 contains the required access permissions
1042 * r5 contains the trap number
1043 *
1044 * at return r3 = 0 for success
1045 */
1046 bl .hash_page /* build HPTE if possible */
1047 cmpdi r3,0 /* see if hash_page succeeded */
1048
1049#ifdef DO_SOFT_DISABLE
1050 /*
1051 * If we had interrupts soft-enabled at the point where the
1052 * DSI/ISI occurred, and an interrupt came in during hash_page,
1053 * handle it now.
1054 * We jump to ret_from_except_lite rather than fast_exception_return
1055 * because ret_from_except_lite will check for and handle pending
1056 * interrupts if necessary.
1057 */
1058 beq .ret_from_except_lite
1059 /* For a hash failure, we don't bother re-enabling interrupts */
1060 ble- 12f
1061
1062 /*
1063 * hash_page couldn't handle it, set soft interrupt enable back
1064 * to what it was before the trap. Note that .local_irq_restore
1065 * handles any interrupts pending at this point.
1066 */
1067 ld r3,SOFTE(r1)
1068 bl .local_irq_restore
1069 b 11f
1070#else
1071 beq fast_exception_return /* Return from exception on success */
1072 ble- 12f /* Failure return from hash_page */
1073
1074 /* fall through */
1075#endif
1076
1077/* Here we have a page fault that hash_page can't handle. */
1078_GLOBAL(handle_page_fault)
1079 ENABLE_INTS
108011: ld r4,_DAR(r1)
1081 ld r5,_DSISR(r1)
1082 addi r3,r1,STACK_FRAME_OVERHEAD
1083 bl .do_page_fault
1084 cmpdi r3,0
1085 beq+ .ret_from_except_lite
1086 bl .save_nvgprs
1087 mr r5,r3
1088 addi r3,r1,STACK_FRAME_OVERHEAD
1089 lwz r4,_DAR(r1)
1090 bl .bad_page_fault
1091 b .ret_from_except
1092
1093/* We have a page fault that hash_page could handle but HV refused
1094 * the PTE insertion
1095 */
109612: bl .save_nvgprs
1097 addi r3,r1,STACK_FRAME_OVERHEAD
1098 lwz r4,_DAR(r1)
1099 bl .low_hash_fault
1100 b .ret_from_except
1101
1102 /* here we have a segment miss */
1103_GLOBAL(do_ste_alloc)
1104 bl .ste_allocate /* try to insert stab entry */
1105 cmpdi r3,0
1106 beq+ fast_exception_return
1107 b .handle_page_fault
1108
1109/*
1110 * r13 points to the PACA, r9 contains the saved CR,
1111 * r11 and r12 contain the saved SRR0 and SRR1.
1112 * r9 - r13 are saved in paca->exslb.
1113 * We assume we aren't going to take any exceptions during this procedure.
1114 * We assume (DAR >> 60) == 0xc.
1115 */
1116 .align 7
1117_GLOBAL(do_stab_bolted)
1118 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1119 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1120
1121 /* Hash to the primary group */
1122 ld r10,PACASTABVIRT(r13)
1123 mfspr r11,DAR
1124 srdi r11,r11,28
1125 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1126
1127 /* Calculate VSID */
1128 /* This is a kernel address, so protovsid = ESID */
1129 ASM_VSID_SCRAMBLE(r11, r9)
1130 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1131
1132 /* Search the primary group for a free entry */
11331: ld r11,0(r10) /* Test valid bit of the current ste */
1134 andi. r11,r11,0x80
1135 beq 2f
1136 addi r10,r10,16
1137 andi. r11,r10,0x70
1138 bne 1b
1139
1140 /* Stick for only searching the primary group for now. */
1141 /* At least for now, we use a very simple random castout scheme */
1142 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1143 mftb r11
1144 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1145 ori r11,r11,0x10
1146
1147 /* r10 currently points to an ste one past the group of interest */
1148 /* make it point to the randomly selected entry */
1149 subi r10,r10,128
1150 or r10,r10,r11 /* r10 is the entry to invalidate */
1151
1152 isync /* mark the entry invalid */
1153 ld r11,0(r10)
1154 rldicl r11,r11,56,1 /* clear the valid bit */
1155 rotldi r11,r11,8
1156 std r11,0(r10)
1157 sync
1158
1159 clrrdi r11,r11,28 /* Get the esid part of the ste */
1160 slbie r11
1161
11622: std r9,8(r10) /* Store the vsid part of the ste */
1163 eieio
1164
1165 mfspr r11,DAR /* Get the new esid */
1166 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1167 ori r11,r11,0x90 /* Turn on valid and kp */
1168 std r11,0(r10) /* Put new entry back into the stab */
1169
1170 sync
1171
1172 /* All done -- return from exception. */
1173 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1174 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1175
1176 andi. r10,r12,MSR_RI
1177 beq- unrecov_slb
1178
1179 mtcrf 0x80,r9 /* restore CR */
1180
1181 mfmsr r10
1182 clrrdi r10,r10,2
1183 mtmsrd r10,1
1184
1185 mtspr SRR0,r11
1186 mtspr SRR1,r12
1187 ld r9,PACA_EXSLB+EX_R9(r13)
1188 ld r10,PACA_EXSLB+EX_R10(r13)
1189 ld r11,PACA_EXSLB+EX_R11(r13)
1190 ld r12,PACA_EXSLB+EX_R12(r13)
1191 ld r13,PACA_EXSLB+EX_R13(r13)
1192 rfid
1193 b . /* prevent speculative execution */
1194
1195/*
1196 * r13 points to the PACA, r9 contains the saved CR,
1197 * r11 and r12 contain the saved SRR0 and SRR1.
1198 * r3 has the faulting address
1199 * r9 - r13 are saved in paca->exslb.
1200 * r3 is saved in paca->slb_r3
1201 * We assume we aren't going to take any exceptions during this procedure.
1202 */
1203_GLOBAL(do_slb_miss)
1204 mflr r10
1205
1206 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1207 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1208
1209 bl .slb_allocate /* handle it */
1210
1211 /* All done -- return from exception. */
1212
1213 ld r10,PACA_EXSLB+EX_LR(r13)
1214 ld r3,PACA_EXSLB+EX_R3(r13)
1215 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1216#ifdef CONFIG_PPC_ISERIES
1217 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1218#endif /* CONFIG_PPC_ISERIES */
1219
1220 mtlr r10
1221
1222 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1223 beq- unrecov_slb
1224
1225.machine push
1226.machine "power4"
1227 mtcrf 0x80,r9
1228 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1229.machine pop
1230
1231#ifdef CONFIG_PPC_ISERIES
1232 mtspr SRR0,r11
1233 mtspr SRR1,r12
1234#endif /* CONFIG_PPC_ISERIES */
1235 ld r9,PACA_EXSLB+EX_R9(r13)
1236 ld r10,PACA_EXSLB+EX_R10(r13)
1237 ld r11,PACA_EXSLB+EX_R11(r13)
1238 ld r12,PACA_EXSLB+EX_R12(r13)
1239 ld r13,PACA_EXSLB+EX_R13(r13)
1240 rfid
1241 b . /* prevent speculative execution */
1242
1243unrecov_slb:
1244 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1245 DISABLE_INTS
1246 bl .save_nvgprs
12471: addi r3,r1,STACK_FRAME_OVERHEAD
1248 bl .unrecoverable_exception
1249 b 1b
1250
David Gibsonec465512005-08-19 14:52:31 +10001251/*
David Gibsonc59c4642005-08-19 14:52:31 +10001252 * Space for CPU0's segment table.
1253 *
1254 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT).
1259 */
1260 . = STAB0_PHYS_ADDR /* 0x6000 */
1261 .globl initial_stab
1262initial_stab:
1263 .space 4096
1264
1265/*
David Gibsonec465512005-08-19 14:52:31 +10001266 * Data area reserved for FWNMI option.
1267 * This address (0x7000) is fixed by the RPA.
1268 */
1269 .= 0x7000
1270 .globl fwnmi_data_area
1271fwnmi_data_area:
1272 .space PAGE_SIZE
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274/*
1275 * On pSeries, secondary processors spin in the following code.
1276 * At entry, r3 = this processor's number (physical cpu id)
1277 */
1278_GLOBAL(pSeries_secondary_smp_init)
1279 mr r24,r3
1280
1281 /* turn on 64-bit mode */
1282 bl .enable_64b_mode
1283 isync
1284
1285 /* Copy some CPU settings from CPU 0 */
1286 bl .__restore_cpu_setup
1287
1288 /* Set up a paca value for this processor. Since we have the
R Sharadafce0d572005-06-25 14:58:10 -07001289 * physical cpu id in r24, we need to search the pacas to find
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 * which logical id maps to our physical one.
1291 */
1292 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1293 li r5,0 /* logical cpu id */
12941: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1295 cmpw r6,r24 /* Compare to our id */
1296 beq 2f
1297 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1298 addi r5,r5,1
1299 cmpwi r5,NR_CPUS
1300 blt 1b
1301
R Sharadafce0d572005-06-25 14:58:10 -07001302 mr r3,r24 /* not found, copy phys to r3 */
1303 b .kexec_wait /* next kernel might do better */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
13052: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
David Gibson91a57fc2005-08-19 14:52:32 +10001306 /* From now on, r24 is expected to be logical cpuid */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 mr r24,r5
13083: HMT_LOW
1309 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1310 /* start. */
1311 sync
1312
1313 /* Create a temp kernel stack for use before relocation is on. */
1314 ld r1,PACAEMERGSP(r13)
1315 subi r1,r1,STACK_FRAME_OVERHEAD
1316
1317 cmpwi 0,r23,0
1318#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 bne .__secondary_start
1320#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 b 3b /* Loop until told to go */
1322
1323#ifdef CONFIG_PPC_ISERIES
1324_STATIC(__start_initialization_iSeries)
1325 /* Clear out the BSS */
1326 LOADADDR(r11,__bss_stop)
1327 LOADADDR(r8,__bss_start)
1328 sub r11,r11,r8 /* bss size */
1329 addi r11,r11,7 /* round up to an even double word */
1330 rldicl. r11,r11,61,3 /* shift right by 3 */
1331 beq 4f
1332 addi r8,r8,-8
1333 li r0,0
1334 mtctr r11 /* zero this many doublewords */
13353: stdu r0,8(r8)
1336 bdnz 3b
13374:
1338 LOADADDR(r1,init_thread_union)
1339 addi r1,r1,THREAD_SIZE
1340 li r0,0
1341 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1342
1343 LOADADDR(r3,cpu_specs)
1344 LOADADDR(r4,cur_cpu_spec)
1345 li r5,0
1346 bl .identify_cpu
1347
1348 LOADADDR(r2,__toc_start)
1349 addi r2,r2,0x4000
1350 addi r2,r2,0x4000
1351
1352 bl .iSeries_early_setup
1353
1354 /* relocation is on at this point */
1355
1356 b .start_here_common
1357#endif /* CONFIG_PPC_ISERIES */
1358
1359#ifdef CONFIG_PPC_MULTIPLATFORM
1360
1361_STATIC(__mmu_off)
1362 mfmsr r3
1363 andi. r0,r3,MSR_IR|MSR_DR
1364 beqlr
1365 andc r3,r3,r0
1366 mtspr SPRN_SRR0,r4
1367 mtspr SPRN_SRR1,r3
1368 sync
1369 rfid
1370 b . /* prevent speculative execution */
1371
1372
1373/*
1374 * Here is our main kernel entry point. We support currently 2 kind of entries
1375 * depending on the value of r5.
1376 *
1377 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1378 * in r3...r7
1379 *
1380 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1381 * DT block, r4 is a physical pointer to the kernel itself
1382 *
1383 */
1384_GLOBAL(__start_initialization_multiplatform)
1385 /*
1386 * Are we booted from a PROM Of-type client-interface ?
1387 */
1388 cmpldi cr0,r5,0
1389 bne .__boot_from_prom /* yes -> prom */
1390
1391 /* Save parameters */
1392 mr r31,r3
1393 mr r30,r4
1394
1395 /* Make sure we are running in 64 bits mode */
1396 bl .enable_64b_mode
1397
1398 /* Setup some critical 970 SPRs before switching MMU off */
1399 bl .__970_cpu_preinit
1400
1401 /* cpu # */
1402 li r24,0
1403
1404 /* Switch off MMU if not already */
1405 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1406 add r4,r4,r30
1407 bl .__mmu_off
1408 b .__after_prom_start
1409
1410_STATIC(__boot_from_prom)
1411 /* Save parameters */
1412 mr r31,r3
1413 mr r30,r4
1414 mr r29,r5
1415 mr r28,r6
1416 mr r27,r7
1417
1418 /* Make sure we are running in 64 bits mode */
1419 bl .enable_64b_mode
1420
1421 /* put a relocation offset into r3 */
1422 bl .reloc_offset
1423
1424 LOADADDR(r2,__toc_start)
1425 addi r2,r2,0x4000
1426 addi r2,r2,0x4000
1427
1428 /* Relocate the TOC from a virt addr to a real addr */
1429 sub r2,r2,r3
1430
1431 /* Restore parameters */
1432 mr r3,r31
1433 mr r4,r30
1434 mr r5,r29
1435 mr r6,r28
1436 mr r7,r27
1437
1438 /* Do all of the interaction with OF client interface */
1439 bl .prom_init
1440 /* We never return */
1441 trap
1442
1443/*
1444 * At this point, r3 contains the physical address we are running at,
1445 * returned by prom_init()
1446 */
1447_STATIC(__after_prom_start)
1448
1449/*
1450 * We need to run with __start at physical address 0.
1451 * This will leave some code in the first 256B of
1452 * real memory, which are reserved for software use.
1453 * The remainder of the first page is loaded with the fixed
1454 * interrupt vectors. The next two pages are filled with
1455 * unknown exception placeholders.
1456 *
1457 * Note: This process overwrites the OF exception vectors.
1458 * r26 == relocation offset
1459 * r27 == KERNELBASE
1460 */
1461 bl .reloc_offset
1462 mr r26,r3
1463 SET_REG_TO_CONST(r27,KERNELBASE)
1464
1465 li r3,0 /* target addr */
1466
1467 // XXX FIXME: Use phys returned by OF (r30)
1468 sub r4,r27,r26 /* source addr */
1469 /* current address of _start */
1470 /* i.e. where we are running */
1471 /* the source addr */
1472
1473 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1474 sub r5,r5,r27
1475
1476 li r6,0x100 /* Start offset, the first 0x100 */
1477 /* bytes were copied earlier. */
1478
1479 bl .copy_and_flush /* copy the first n bytes */
1480 /* this includes the code being */
1481 /* executed here. */
1482
1483 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1484 mtctr r0 /* that we just made/relocated */
1485 bctr
1486
14874: LOADADDR(r5,klimit)
1488 sub r5,r5,r26
1489 ld r5,0(r5) /* get the value of klimit */
1490 sub r5,r5,r27
1491 bl .copy_and_flush /* copy the rest */
1492 b .start_here_multiplatform
1493
1494#endif /* CONFIG_PPC_MULTIPLATFORM */
1495
1496/*
1497 * Copy routine used to copy the kernel to start at physical address 0
1498 * and flush and invalidate the caches as needed.
1499 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1500 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1501 *
1502 * Note: this routine *only* clobbers r0, r6 and lr
1503 */
1504_GLOBAL(copy_and_flush)
1505 addi r5,r5,-8
1506 addi r6,r6,-8
15074: li r0,16 /* Use the least common */
1508 /* denominator cache line */
1509 /* size. This results in */
1510 /* extra cache line flushes */
1511 /* but operation is correct. */
1512 /* Can't get cache line size */
1513 /* from NACA as it is being */
1514 /* moved too. */
1515
1516 mtctr r0 /* put # words/line in ctr */
15173: addi r6,r6,8 /* copy a cache line */
1518 ldx r0,r6,r4
1519 stdx r0,r6,r3
1520 bdnz 3b
1521 dcbst r6,r3 /* write it to memory */
1522 sync
1523 icbi r6,r3 /* flush the icache line */
1524 cmpld 0,r6,r5
1525 blt 4b
1526 sync
1527 addi r5,r5,8
1528 addi r6,r6,8
1529 blr
1530
1531.align 8
1532copy_to_here:
1533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534#ifdef CONFIG_SMP
1535#ifdef CONFIG_PPC_PMAC
1536/*
1537 * On PowerMac, secondary processors starts from the reset vector, which
1538 * is temporarily turned into a call to one of the functions below.
1539 */
1540 .section ".text";
1541 .align 2 ;
1542
1543 .globl pmac_secondary_start_1
1544pmac_secondary_start_1:
1545 li r24, 1
1546 b .pmac_secondary_start
1547
1548 .globl pmac_secondary_start_2
1549pmac_secondary_start_2:
1550 li r24, 2
1551 b .pmac_secondary_start
1552
1553 .globl pmac_secondary_start_3
1554pmac_secondary_start_3:
1555 li r24, 3
1556 b .pmac_secondary_start
1557
1558_GLOBAL(pmac_secondary_start)
1559 /* turn on 64-bit mode */
1560 bl .enable_64b_mode
1561 isync
1562
1563 /* Copy some CPU settings from CPU 0 */
1564 bl .__restore_cpu_setup
1565
1566 /* pSeries do that early though I don't think we really need it */
1567 mfmsr r3
1568 ori r3,r3,MSR_RI
1569 mtmsrd r3 /* RI on */
1570
1571 /* Set up a paca value for this processor. */
1572 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1573 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1574 add r13,r13,r4 /* for this processor. */
1575 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1576
1577 /* Create a temp kernel stack for use before relocation is on. */
1578 ld r1,PACAEMERGSP(r13)
1579 subi r1,r1,STACK_FRAME_OVERHEAD
1580
1581 b .__secondary_start
1582
1583#endif /* CONFIG_PPC_PMAC */
1584
1585/*
1586 * This function is called after the master CPU has released the
1587 * secondary processors. The execution environment is relocation off.
1588 * The paca for this processor has the following fields initialized at
1589 * this point:
1590 * 1. Processor number
1591 * 2. Segment table pointer (virtual address)
1592 * On entry the following are set:
1593 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1594 * r24 = cpu# (in Linux terms)
1595 * r13 = paca virtual address
1596 * SPRG3 = paca virtual address
1597 */
1598_GLOBAL(__secondary_start)
1599
1600 HMT_MEDIUM /* Set thread priority to MEDIUM */
1601
1602 ld r2,PACATOC(r13)
1603 li r6,0
1604 stb r6,PACAPROCENABLED(r13)
1605
1606#ifndef CONFIG_PPC_ISERIES
1607 /* Initialize the page table pointer register. */
1608 LOADADDR(r6,_SDR1)
1609 ld r6,0(r6) /* get the value of _SDR1 */
1610 mtspr SDR1,r6 /* set the htab location */
1611#endif
1612 /* Initialize the first segment table (or SLB) entry */
1613 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1614 bl .stab_initialize
1615
1616 /* Initialize the kernel stack. Just a repeat for iSeries. */
1617 LOADADDR(r3,current_set)
1618 sldi r28,r24,3 /* get current_set[cpu#] */
1619 ldx r1,r3,r28
1620 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1621 std r1,PACAKSAVE(r13)
1622
1623 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1624 ori r4,r3,1 /* turn on valid bit */
1625
1626#ifdef CONFIG_PPC_ISERIES
1627 li r0,-1 /* hypervisor call */
1628 li r3,1
1629 sldi r3,r3,63 /* 0x8000000000000000 */
1630 ori r3,r3,4 /* 0x8000000000000004 */
1631 sc /* HvCall_setASR */
1632#else
1633 /* set the ASR */
1634 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1635 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1636 cmpldi r3,PLATFORM_PSERIES_LPAR
1637 bne 98f
1638 mfspr r3,PVR
1639 srwi r3,r3,16
1640 cmpwi r3,0x37 /* SStar */
1641 beq 97f
1642 cmpwi r3,0x36 /* IStar */
1643 beq 97f
1644 cmpwi r3,0x34 /* Pulsar */
1645 bne 98f
164697: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1647 HVSC /* Invoking hcall */
1648 b 99f
164998: /* !(rpa hypervisor) || !(star) */
1650 mtasr r4 /* set the stab location */
165199:
1652#endif
1653 li r7,0
1654 mtlr r7
1655
1656 /* enable MMU and jump to start_secondary */
1657 LOADADDR(r3,.start_secondary_prolog)
1658 SET_REG_TO_CONST(r4, MSR_KERNEL)
1659#ifdef DO_SOFT_DISABLE
1660 ori r4,r4,MSR_EE
1661#endif
1662 mtspr SRR0,r3
1663 mtspr SRR1,r4
1664 rfid
1665 b . /* prevent speculative execution */
1666
1667/*
1668 * Running with relocation on at this point. All we want to do is
1669 * zero the stack back-chain pointer before going into C code.
1670 */
1671_GLOBAL(start_secondary_prolog)
1672 li r3,0
1673 std r3,0(r1) /* Zero the stack frame pointer */
1674 bl .start_secondary
1675#endif
1676
1677/*
1678 * This subroutine clobbers r11 and r12
1679 */
1680_GLOBAL(enable_64b_mode)
1681 mfmsr r11 /* grab the current MSR */
1682 li r12,1
1683 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1684 or r11,r11,r12
1685 li r12,1
1686 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1687 or r11,r11,r12
1688 mtmsrd r11
1689 isync
1690 blr
1691
1692#ifdef CONFIG_PPC_MULTIPLATFORM
1693/*
1694 * This is where the main kernel code starts.
1695 */
1696_STATIC(start_here_multiplatform)
1697 /* get a new offset, now that the kernel has moved. */
1698 bl .reloc_offset
1699 mr r26,r3
1700
1701 /* Clear out the BSS. It may have been done in prom_init,
1702 * already but that's irrelevant since prom_init will soon
1703 * be detached from the kernel completely. Besides, we need
1704 * to clear it now for kexec-style entry.
1705 */
1706 LOADADDR(r11,__bss_stop)
1707 LOADADDR(r8,__bss_start)
1708 sub r11,r11,r8 /* bss size */
1709 addi r11,r11,7 /* round up to an even double word */
1710 rldicl. r11,r11,61,3 /* shift right by 3 */
1711 beq 4f
1712 addi r8,r8,-8
1713 li r0,0
1714 mtctr r11 /* zero this many doublewords */
17153: stdu r0,8(r8)
1716 bdnz 3b
17174:
1718
1719 mfmsr r6
1720 ori r6,r6,MSR_RI
1721 mtmsrd r6 /* RI on */
1722
1723#ifdef CONFIG_HMT
1724 /* Start up the second thread on cpu 0 */
1725 mfspr r3,PVR
1726 srwi r3,r3,16
1727 cmpwi r3,0x34 /* Pulsar */
1728 beq 90f
1729 cmpwi r3,0x36 /* Icestar */
1730 beq 90f
1731 cmpwi r3,0x37 /* SStar */
1732 beq 90f
1733 b 91f /* HMT not supported */
173490: li r3,0
1735 bl .hmt_start_secondary
173691:
1737#endif
1738
1739 /* The following gets the stack and TOC set up with the regs */
1740 /* pointing to the real addr of the kernel stack. This is */
1741 /* all done to support the C function call below which sets */
1742 /* up the htab. This is done because we have relocated the */
1743 /* kernel but are still running in real mode. */
1744
1745 LOADADDR(r3,init_thread_union)
1746 sub r3,r3,r26
1747
1748 /* set up a stack pointer (physical address) */
1749 addi r1,r3,THREAD_SIZE
1750 li r0,0
1751 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1752
1753 /* set up the TOC (physical address) */
1754 LOADADDR(r2,__toc_start)
1755 addi r2,r2,0x4000
1756 addi r2,r2,0x4000
1757 sub r2,r2,r26
1758
1759 LOADADDR(r3,cpu_specs)
1760 sub r3,r3,r26
1761 LOADADDR(r4,cur_cpu_spec)
1762 sub r4,r4,r26
1763 mr r5,r26
1764 bl .identify_cpu
1765
1766 /* Save some low level config HIDs of CPU0 to be copied to
1767 * other CPUs later on, or used for suspend/resume
1768 */
1769 bl .__save_cpu_setup
1770 sync
1771
1772 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1773 * note that boot_cpuid can always be 0 nowadays since there is
1774 * nowhere it can be initialized differently before we reach this
1775 * code
1776 */
1777 LOADADDR(r27, boot_cpuid)
1778 sub r27,r27,r26
1779 lwz r27,0(r27)
1780
1781 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1782 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1783 add r13,r13,r24 /* for this processor. */
1784 sub r13,r13,r26 /* convert to physical addr */
1785 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1786
1787 /* Do very early kernel initializations, including initial hash table,
1788 * stab and slb setup before we turn on relocation. */
1789
1790 /* Restore parameters passed from prom_init/kexec */
1791 mr r3,r31
1792 bl .early_setup
1793
1794 /* set the ASR */
1795 ld r3,PACASTABREAL(r13)
1796 ori r4,r3,1 /* turn on valid bit */
1797 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1798 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1799 cmpldi r3,PLATFORM_PSERIES_LPAR
1800 bne 98f
1801 mfspr r3,PVR
1802 srwi r3,r3,16
1803 cmpwi r3,0x37 /* SStar */
1804 beq 97f
1805 cmpwi r3,0x36 /* IStar */
1806 beq 97f
1807 cmpwi r3,0x34 /* Pulsar */
1808 bne 98f
180997: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1810 HVSC /* Invoking hcall */
1811 b 99f
181298: /* !(rpa hypervisor) || !(star) */
1813 mtasr r4 /* set the stab location */
181499:
1815 /* Set SDR1 (hash table pointer) */
1816 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1817 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1818 /* Test if bit 0 is set (LPAR bit) */
1819 andi. r3,r3,0x1
1820 bne 98f
1821 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1822 sub r6,r6,r26
1823 ld r6,0(r6) /* get the value of _SDR1 */
1824 mtspr SDR1,r6 /* set the htab location */
182598:
1826 LOADADDR(r3,.start_here_common)
1827 SET_REG_TO_CONST(r4, MSR_KERNEL)
1828 mtspr SRR0,r3
1829 mtspr SRR1,r4
1830 rfid
1831 b . /* prevent speculative execution */
1832#endif /* CONFIG_PPC_MULTIPLATFORM */
1833
1834 /* This is where all platforms converge execution */
1835_STATIC(start_here_common)
1836 /* relocation is on at this point */
1837
1838 /* The following code sets up the SP and TOC now that we are */
1839 /* running with translation enabled. */
1840
1841 LOADADDR(r3,init_thread_union)
1842
1843 /* set up the stack */
1844 addi r1,r3,THREAD_SIZE
1845 li r0,0
1846 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1847
1848 /* Apply the CPUs-specific fixups (nop out sections not relevant
1849 * to this CPU
1850 */
1851 li r3,0
1852 bl .do_cpu_ftr_fixups
1853
1854 LOADADDR(r26, boot_cpuid)
1855 lwz r26,0(r26)
1856
1857 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1858 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1859 add r13,r13,r24 /* for this processor. */
1860 mtspr SPRG3,r13
1861
1862 /* ptr to current */
1863 LOADADDR(r4,init_task)
1864 std r4,PACACURRENT(r13)
1865
1866 /* Load the TOC */
1867 ld r2,PACATOC(r13)
1868 std r1,PACAKSAVE(r13)
1869
1870 bl .setup_system
1871
1872 /* Load up the kernel context */
18735:
1874#ifdef DO_SOFT_DISABLE
1875 li r5,0
1876 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1877 mfmsr r5
1878 ori r5,r5,MSR_EE /* Hard Enabled */
1879 mtmsrd r5
1880#endif
1881
1882 bl .start_kernel
1883
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884_GLOBAL(hmt_init)
1885#ifdef CONFIG_HMT
1886 LOADADDR(r5, hmt_thread_data)
1887 mfspr r7,PVR
1888 srwi r7,r7,16
1889 cmpwi r7,0x34 /* Pulsar */
1890 beq 90f
1891 cmpwi r7,0x36 /* Icestar */
1892 beq 91f
1893 cmpwi r7,0x37 /* SStar */
1894 beq 91f
1895 b 101f
189690: mfspr r6,PIR
1897 andi. r6,r6,0x1f
1898 b 92f
189991: mfspr r6,PIR
1900 andi. r6,r6,0x3ff
190192: sldi r4,r24,3
1902 stwx r6,r5,r4
1903 bl .hmt_start_secondary
1904 b 101f
1905
1906__hmt_secondary_hold:
1907 LOADADDR(r5, hmt_thread_data)
1908 clrldi r5,r5,4
1909 li r7,0
1910 mfspr r6,PIR
1911 mfspr r8,PVR
1912 srwi r8,r8,16
1913 cmpwi r8,0x34
1914 bne 93f
1915 andi. r6,r6,0x1f
1916 b 103f
191793: andi. r6,r6,0x3f
1918
1919103: lwzx r8,r5,r7
1920 cmpw r8,r6
1921 beq 104f
1922 addi r7,r7,8
1923 b 103b
1924
1925104: addi r7,r7,4
1926 lwzx r9,r5,r7
1927 mr r24,r9
1928101:
1929#endif
1930 mr r3,r24
1931 b .pSeries_secondary_smp_init
1932
1933#ifdef CONFIG_HMT
1934_GLOBAL(hmt_start_secondary)
1935 LOADADDR(r4,__hmt_secondary_hold)
1936 clrldi r4,r4,4
1937 mtspr NIADORM, r4
1938 mfspr r4, MSRDORM
1939 li r5, -65
1940 and r4, r4, r5
1941 mtspr MSRDORM, r4
1942 lis r4,0xffef
1943 ori r4,r4,0x7403
1944 mtspr TSC, r4
1945 li r4,0x1f4
1946 mtspr TST, r4
1947 mfspr r4, HID0
1948 ori r4, r4, 0x1
1949 mtspr HID0, r4
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -07001950 mfspr r4, SPRN_CTRLF
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 oris r4, r4, 0x40
Anton Blanchard6dc2f0c2005-06-02 14:02:02 -07001952 mtspr SPRN_CTRLT, r4
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 blr
1954#endif
1955
Olof Johansson75eedfe2005-08-04 12:53:29 -07001956#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957_GLOBAL(smp_release_cpus)
1958 /* All secondary cpus are spinning on a common
1959 * spinloop, release them all now so they can start
1960 * to spin on their individual paca spinloops.
1961 * For non SMP kernels, the secondary cpus never
1962 * get out of the common spinloop.
1963 */
1964 li r3,1
1965 LOADADDR(r5,__secondary_hold_spinloop)
1966 std r3,0(r5)
1967 sync
1968 blr
1969#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
1970
1971
1972/*
1973 * We put a few things here that have to be page-aligned.
1974 * This stuff goes at the beginning of the data segment,
1975 * which is page-aligned.
1976 */
1977 .data
1978 .align 12
1979 .globl sdata
1980sdata:
1981 .globl empty_zero_page
1982empty_zero_page:
1983 .space 4096
1984
1985 .globl swapper_pg_dir
1986swapper_pg_dir:
1987 .space 4096
1988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989/*
1990 * This space gets a copy of optional info passed to us by the bootstrap
1991 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1992 */
1993 .globl cmd_line
1994cmd_line:
1995 .space COMMAND_LINE_SIZE