blob: 92a744c31ab183b2aec6e4532a593cef614bba13 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h>
29#include <linux/threads.h>
30#include <asm/processor.h>
31#include <asm/page.h>
32#include <asm/mmu.h>
33#include <asm/naca.h>
34#include <asm/systemcfg.h>
35#include <asm/ppc_asm.h>
36#include <asm/offsets.h>
37#include <asm/bug.h>
38#include <asm/cputable.h>
39#include <asm/setup.h>
40#include <asm/hvcall.h>
41
42#ifdef CONFIG_PPC_ISERIES
43#define DO_SOFT_DISABLE
44#endif
45
46/*
47 * hcall interface to pSeries LPAR
48 */
49#define H_SET_ASR 0x30
50
51/*
52 * We layout physical memory as follows:
53 * 0x0000 - 0x00ff : Secondary processor spin code
54 * 0x0100 - 0x2fff : pSeries Interrupt prologs
55 * 0x3000 - 0x3fff : Interrupt support
56 * 0x4000 - 0x4fff : NACA
57 * 0x6000 : iSeries and common interrupt prologs
58 * 0x9000 - 0x9fff : Initial segment table
59 */
60
61/*
62 * SPRG Usage
63 *
64 * Register Definition
65 *
66 * SPRG0 reserved for hypervisor
67 * SPRG1 temp - used to save gpr
68 * SPRG2 temp - used to save gpr
69 * SPRG3 virt addr of paca
70 */
71
72/*
73 * Entering into this code we make the following assumptions:
74 * For pSeries:
75 * 1. The MMU is off & open firmware is running in real mode.
76 * 2. The kernel is entered at __start
77 *
78 * For iSeries:
79 * 1. The MMU is on (as it always is for iSeries)
80 * 2. The kernel is entered at system_reset_iSeries
81 */
82
83 .text
84 .globl _stext
85_stext:
86#ifdef CONFIG_PPC_MULTIPLATFORM
87_GLOBAL(__start)
88 /* NOP this out unconditionally */
89BEGIN_FTR_SECTION
90 b .__start_initialization_multiplatform
91END_FTR_SECTION(0, 1)
92#endif /* CONFIG_PPC_MULTIPLATFORM */
93
94 /* Catch branch to 0 in real mode */
95 trap
96#ifdef CONFIG_PPC_ISERIES
97 /*
98 * At offset 0x20, there is a pointer to iSeries LPAR data.
99 * This is required by the hypervisor
100 */
101 . = 0x20
102 .llong hvReleaseData-KERNELBASE
103
104 /*
105 * At offset 0x28 and 0x30 are offsets to the msChunks
106 * array (used by the iSeries LPAR debugger to do translation
107 * between physical addresses and absolute addresses) and
108 * to the pidhash table (also used by the debugger)
109 */
110 .llong msChunks-KERNELBASE
111 .llong 0 /* pidhash-KERNELBASE SFRXXX */
112
113 /* Offset 0x38 - Pointer to start of embedded System.map */
114 .globl embedded_sysmap_start
115embedded_sysmap_start:
116 .llong 0
117 /* Offset 0x40 - Pointer to end of embedded System.map */
118 .globl embedded_sysmap_end
119embedded_sysmap_end:
120 .llong 0
121
122#else /* CONFIG_PPC_ISERIES */
123
124 /* Secondary processors spin on this value until it goes to 1. */
125 .globl __secondary_hold_spinloop
126__secondary_hold_spinloop:
127 .llong 0x0
128
129 /* Secondary processors write this value with their cpu # */
130 /* after they enter the spin loop immediately below. */
131 .globl __secondary_hold_acknowledge
132__secondary_hold_acknowledge:
133 .llong 0x0
134
135 . = 0x60
136/*
137 * The following code is used on pSeries to hold secondary processors
138 * in a spin loop after they have been freed from OpenFirmware, but
139 * before the bulk of the kernel has been relocated. This code
140 * is relocated to physical address 0x60 before prom_init is run.
141 * All of it must fit below the first exception vector at 0x100.
142 */
143_GLOBAL(__secondary_hold)
144 mfmsr r24
145 ori r24,r24,MSR_RI
146 mtmsrd r24 /* RI on */
147
148 /* Grab our linux cpu number */
149 mr r24,r3
150
151 /* Tell the master cpu we're here */
152 /* Relocation is off & we are located at an address less */
153 /* than 0x100, so only need to grab low order offset. */
154 std r24,__secondary_hold_acknowledge@l(0)
155 sync
156
157 /* All secondary cpu's wait here until told to start. */
158100: ld r4,__secondary_hold_spinloop@l(0)
159 cmpdi 0,r4,1
160 bne 100b
161
162#ifdef CONFIG_HMT
163 b .hmt_init
164#else
165#ifdef CONFIG_SMP
166 mr r3,r24
167 b .pSeries_secondary_smp_init
168#else
169 BUG_OPCODE
170#endif
171#endif
172#endif
173
174/* This value is used to mark exception frames on the stack. */
175 .section ".toc","aw"
176exception_marker:
177 .tc ID_72656773_68657265[TC],0x7265677368657265
178 .text
179
180/*
181 * The following macros define the code that appears as
182 * the prologue to each of the exception handlers. They
183 * are split into two parts to allow a single kernel binary
184 * to be used for pSeries and iSeries.
185 * LOL. One day... - paulus
186 */
187
188/*
189 * We make as much of the exception code common between native
190 * exception handlers (including pSeries LPAR) and iSeries LPAR
191 * implementations as possible.
192 */
193
194/*
195 * This is the start of the interrupt handlers for pSeries
196 * This code runs with relocation off.
197 */
198#define EX_R9 0
199#define EX_R10 8
200#define EX_R11 16
201#define EX_R12 24
202#define EX_R13 32
203#define EX_SRR0 40
204#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
205#define EX_DAR 48
206#define EX_LR 48 /* SLB miss saves LR, but not DAR */
207#define EX_DSISR 56
208#define EX_CCR 60
209
210#define EXCEPTION_PROLOG_PSERIES(area, label) \
211 mfspr r13,SPRG3; /* get paca address into r13 */ \
212 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
213 std r10,area+EX_R10(r13); \
214 std r11,area+EX_R11(r13); \
215 std r12,area+EX_R12(r13); \
216 mfspr r9,SPRG1; \
217 std r9,area+EX_R13(r13); \
218 mfcr r9; \
219 clrrdi r12,r13,32; /* get high part of &label */ \
220 mfmsr r10; \
221 mfspr r11,SRR0; /* save SRR0 */ \
222 ori r12,r12,(label)@l; /* virt addr of handler */ \
223 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
224 mtspr SRR0,r12; \
225 mfspr r12,SRR1; /* and SRR1 */ \
226 mtspr SRR1,r10; \
227 rfid; \
228 b . /* prevent speculative execution */
229
230/*
231 * This is the start of the interrupt handlers for iSeries
232 * This code runs with relocation on.
233 */
234#define EXCEPTION_PROLOG_ISERIES_1(area) \
235 mfspr r13,SPRG3; /* get paca address into r13 */ \
236 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
237 std r10,area+EX_R10(r13); \
238 std r11,area+EX_R11(r13); \
239 std r12,area+EX_R12(r13); \
240 mfspr r9,SPRG1; \
241 std r9,area+EX_R13(r13); \
242 mfcr r9
243
244#define EXCEPTION_PROLOG_ISERIES_2 \
245 mfmsr r10; \
246 ld r11,PACALPPACA+LPPACASRR0(r13); \
247 ld r12,PACALPPACA+LPPACASRR1(r13); \
248 ori r10,r10,MSR_RI; \
249 mtmsrd r10,1
250
251/*
252 * The common exception prolog is used for all except a few exceptions
253 * such as a segment miss on a kernel address. We have to be prepared
254 * to take another exception from the point where we first touch the
255 * kernel stack onwards.
256 *
257 * On entry r13 points to the paca, r9-r13 are saved in the paca,
258 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
259 * SRR1, and relocation is on.
260 */
261#define EXCEPTION_PROLOG_COMMON(n, area) \
262 andi. r10,r12,MSR_PR; /* See if coming from user */ \
263 mr r10,r1; /* Save r1 */ \
264 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
265 beq- 1f; \
266 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2671: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
268 bge- cr1,bad_stack; /* abort if it is */ \
269 std r9,_CCR(r1); /* save CR in stackframe */ \
270 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
271 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
272 std r10,0(r1); /* make stack chain pointer */ \
273 std r0,GPR0(r1); /* save r0 in stackframe */ \
274 std r10,GPR1(r1); /* save r1 in stackframe */ \
275 std r2,GPR2(r1); /* save r2 in stackframe */ \
276 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
277 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
278 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
279 ld r10,area+EX_R10(r13); \
280 std r9,GPR9(r1); \
281 std r10,GPR10(r1); \
282 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
283 ld r10,area+EX_R12(r13); \
284 ld r11,area+EX_R13(r13); \
285 std r9,GPR11(r1); \
286 std r10,GPR12(r1); \
287 std r11,GPR13(r1); \
288 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
289 mflr r9; /* save LR in stackframe */ \
290 std r9,_LINK(r1); \
291 mfctr r10; /* save CTR in stackframe */ \
292 std r10,_CTR(r1); \
293 mfspr r11,XER; /* save XER in stackframe */ \
294 std r11,_XER(r1); \
295 li r9,(n)+1; \
296 std r9,_TRAP(r1); /* set trap number */ \
297 li r10,0; \
298 ld r11,exception_marker@toc(r2); \
299 std r10,RESULT(r1); /* clear regs->result */ \
300 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
301
302/*
303 * Exception vectors.
304 */
305#define STD_EXCEPTION_PSERIES(n, label) \
306 . = n; \
307 .globl label##_pSeries; \
308label##_pSeries: \
309 HMT_MEDIUM; \
310 mtspr SPRG1,r13; /* save r13 */ \
311 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
312
313#define STD_EXCEPTION_ISERIES(n, label, area) \
314 .globl label##_iSeries; \
315label##_iSeries: \
316 HMT_MEDIUM; \
317 mtspr SPRG1,r13; /* save r13 */ \
318 EXCEPTION_PROLOG_ISERIES_1(area); \
319 EXCEPTION_PROLOG_ISERIES_2; \
320 b label##_common
321
322#define MASKABLE_EXCEPTION_ISERIES(n, label) \
323 .globl label##_iSeries; \
324label##_iSeries: \
325 HMT_MEDIUM; \
326 mtspr SPRG1,r13; /* save r13 */ \
327 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
328 lbz r10,PACAPROCENABLED(r13); \
329 cmpwi 0,r10,0; \
330 beq- label##_iSeries_masked; \
331 EXCEPTION_PROLOG_ISERIES_2; \
332 b label##_common; \
333
334#ifdef DO_SOFT_DISABLE
335#define DISABLE_INTS \
336 lbz r10,PACAPROCENABLED(r13); \
337 li r11,0; \
338 std r10,SOFTE(r1); \
339 mfmsr r10; \
340 stb r11,PACAPROCENABLED(r13); \
341 ori r10,r10,MSR_EE; \
342 mtmsrd r10,1
343
344#define ENABLE_INTS \
345 lbz r10,PACAPROCENABLED(r13); \
346 mfmsr r11; \
347 std r10,SOFTE(r1); \
348 ori r11,r11,MSR_EE; \
349 mtmsrd r11,1
350
351#else /* hard enable/disable interrupts */
352#define DISABLE_INTS
353
354#define ENABLE_INTS \
355 ld r12,_MSR(r1); \
356 mfmsr r11; \
357 rlwimi r11,r12,0,MSR_EE; \
358 mtmsrd r11,1
359
360#endif
361
362#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
363 .align 7; \
364 .globl label##_common; \
365label##_common: \
366 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
367 DISABLE_INTS; \
368 bl .save_nvgprs; \
369 addi r3,r1,STACK_FRAME_OVERHEAD; \
370 bl hdlr; \
371 b .ret_from_except
372
373#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
374 .align 7; \
375 .globl label##_common; \
376label##_common: \
377 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
378 DISABLE_INTS; \
379 addi r3,r1,STACK_FRAME_OVERHEAD; \
380 bl hdlr; \
381 b .ret_from_except_lite
382
383/*
384 * Start of pSeries system interrupt routines
385 */
386 . = 0x100
387 .globl __start_interrupts
388__start_interrupts:
389
390 STD_EXCEPTION_PSERIES(0x100, system_reset)
391
392 . = 0x200
393_machine_check_pSeries:
394 HMT_MEDIUM
395 mtspr SPRG1,r13 /* save r13 */
396 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
397
398 . = 0x300
399 .globl data_access_pSeries
400data_access_pSeries:
401 HMT_MEDIUM
402 mtspr SPRG1,r13
403BEGIN_FTR_SECTION
404 mtspr SPRG2,r12
405 mfspr r13,DAR
406 mfspr r12,DSISR
407 srdi r13,r13,60
408 rlwimi r13,r12,16,0x20
409 mfcr r12
410 cmpwi r13,0x2c
411 beq .do_stab_bolted_pSeries
412 mtcrf 0x80,r12
413 mfspr r12,SPRG2
414END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
415 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
416
417 . = 0x380
418 .globl data_access_slb_pSeries
419data_access_slb_pSeries:
420 HMT_MEDIUM
421 mtspr SPRG1,r13
422 mfspr r13,SPRG3 /* get paca address into r13 */
423 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
424 std r10,PACA_EXSLB+EX_R10(r13)
425 std r11,PACA_EXSLB+EX_R11(r13)
426 std r12,PACA_EXSLB+EX_R12(r13)
427 std r3,PACA_EXSLB+EX_R3(r13)
428 mfspr r9,SPRG1
429 std r9,PACA_EXSLB+EX_R13(r13)
430 mfcr r9
431 mfspr r12,SRR1 /* and SRR1 */
432 mfspr r3,DAR
433 b .do_slb_miss /* Rel. branch works in real mode */
434
435 STD_EXCEPTION_PSERIES(0x400, instruction_access)
436
437 . = 0x480
438 .globl instruction_access_slb_pSeries
439instruction_access_slb_pSeries:
440 HMT_MEDIUM
441 mtspr SPRG1,r13
442 mfspr r13,SPRG3 /* get paca address into r13 */
443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
444 std r10,PACA_EXSLB+EX_R10(r13)
445 std r11,PACA_EXSLB+EX_R11(r13)
446 std r12,PACA_EXSLB+EX_R12(r13)
447 std r3,PACA_EXSLB+EX_R3(r13)
448 mfspr r9,SPRG1
449 std r9,PACA_EXSLB+EX_R13(r13)
450 mfcr r9
451 mfspr r12,SRR1 /* and SRR1 */
452 mfspr r3,SRR0 /* SRR0 is faulting address */
453 b .do_slb_miss /* Rel. branch works in real mode */
454
455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
456 STD_EXCEPTION_PSERIES(0x600, alignment)
457 STD_EXCEPTION_PSERIES(0x700, program_check)
458 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
459 STD_EXCEPTION_PSERIES(0x900, decrementer)
460 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
461 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
462
463 . = 0xc00
464 .globl system_call_pSeries
465system_call_pSeries:
466 HMT_MEDIUM
467 mr r9,r13
468 mfmsr r10
469 mfspr r13,SPRG3
470 mfspr r11,SRR0
471 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1
477 mtspr SRR1,r10
478 rfid
479 b . /* prevent speculative execution */
480
481 STD_EXCEPTION_PSERIES(0xd00, single_step)
482 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
483
484 /* We need to deal with the Altivec unavailable exception
485 * here which is at 0xf20, thus in the middle of the
486 * prolog code of the PerformanceMonitor one. A little
487 * trickery is thus necessary
488 */
489 . = 0xf00
490 b performance_monitor_pSeries
491
492 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
493
494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
496
497 /* moved from 0xf00 */
498 STD_EXCEPTION_PSERIES(0x3000, performance_monitor)
499
500 . = 0x3100
501_GLOBAL(do_stab_bolted_pSeries)
502 mtcrf 0x80,r12
503 mfspr r12,SPRG2
504 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
505
506
507 /* Space for the naca. Architected to be located at real address
508 * NACA_PHYS_ADDR. Various tools rely on this location being fixed.
509 * The first dword of the naca is required by iSeries LPAR to
510 * point to itVpdAreas. On pSeries native, this value is not used.
511 */
512 . = NACA_PHYS_ADDR
513 .globl __end_interrupts
514__end_interrupts:
515#ifdef CONFIG_PPC_ISERIES
516 .globl naca
517naca:
518 .llong itVpdAreas
519
520 /*
521 * The iSeries LPAR map is at this fixed address
522 * so that the HvReleaseData structure can address
523 * it with a 32-bit offset.
524 *
525 * The VSID values below are dependent on the
526 * VSID generation algorithm. See include/asm/mmu_context.h.
527 */
528
529 . = 0x4800
530
531 .llong 2 /* # ESIDs to be mapped by hypervisor */
532 .llong 1 /* # memory ranges to be mapped by hypervisor */
533 .llong STAB0_PAGE /* Page # of segment table within load area */
534 .llong 0 /* Reserved */
535 .llong 0 /* Reserved */
536 .llong 0 /* Reserved */
537 .llong 0 /* Reserved */
538 .llong 0 /* Reserved */
539 .llong (KERNELBASE>>SID_SHIFT)
540 .llong 0x408f92c94 /* KERNELBASE VSID */
541 /* We have to list the bolted VMALLOC segment here, too, so that it
542 * will be restored on shared processor switch */
543 .llong (VMALLOCBASE>>SID_SHIFT)
544 .llong 0xf09b89af5 /* VMALLOCBASE VSID */
545 .llong 8192 /* # pages to map (32 MB) */
546 .llong 0 /* Offset from start of loadarea to start of map */
547 .llong 0x408f92c940000 /* VPN of first page to map */
548
549 . = 0x6100
550
551/*** ISeries-LPAR interrupt handlers ***/
552
553 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
554
555 .globl data_access_iSeries
556data_access_iSeries:
557 mtspr SPRG1,r13
558BEGIN_FTR_SECTION
559 mtspr SPRG2,r12
560 mfspr r13,DAR
561 mfspr r12,DSISR
562 srdi r13,r13,60
563 rlwimi r13,r12,16,0x20
564 mfcr r12
565 cmpwi r13,0x2c
566 beq .do_stab_bolted_iSeries
567 mtcrf 0x80,r12
568 mfspr r12,SPRG2
569END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
570 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
571 EXCEPTION_PROLOG_ISERIES_2
572 b data_access_common
573
574.do_stab_bolted_iSeries:
575 mtcrf 0x80,r12
576 mfspr r12,SPRG2
577 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
578 EXCEPTION_PROLOG_ISERIES_2
579 b .do_stab_bolted
580
581 .globl data_access_slb_iSeries
582data_access_slb_iSeries:
583 mtspr SPRG1,r13 /* save r13 */
584 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
585 std r3,PACA_EXSLB+EX_R3(r13)
586 ld r12,PACALPPACA+LPPACASRR1(r13)
587 mfspr r3,DAR
588 b .do_slb_miss
589
590 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
591
592 .globl instruction_access_slb_iSeries
593instruction_access_slb_iSeries:
594 mtspr SPRG1,r13 /* save r13 */
595 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
596 std r3,PACA_EXSLB+EX_R3(r13)
597 ld r12,PACALPPACA+LPPACASRR1(r13)
598 ld r3,PACALPPACA+LPPACASRR0(r13)
599 b .do_slb_miss
600
601 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
602 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
603 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
604 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
605 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
606 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
607 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
608
609 .globl system_call_iSeries
610system_call_iSeries:
611 mr r9,r13
612 mfspr r13,SPRG3
613 EXCEPTION_PROLOG_ISERIES_2
614 b system_call_common
615
616 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
617 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
618 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
619
620 .globl system_reset_iSeries
621system_reset_iSeries:
622 mfspr r13,SPRG3 /* Get paca address */
623 mfmsr r24
624 ori r24,r24,MSR_RI
625 mtmsrd r24 /* RI on */
626 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
627 cmpwi 0,r24,0 /* Are we processor 0? */
628 beq .__start_initialization_iSeries /* Start up the first processor */
629 mfspr r4,CTRLF
630 li r5,RUNLATCH /* Turn off the run light */
631 andc r4,r4,r5
632 mtspr CTRLT,r4
633
6341:
635 HMT_LOW
636#ifdef CONFIG_SMP
637 lbz r23,PACAPROCSTART(r13) /* Test if this processor
638 * should start */
639 sync
640 LOADADDR(r3,current_set)
641 sldi r28,r24,3 /* get current_set[cpu#] */
642 ldx r3,r3,r28
643 addi r1,r3,THREAD_SIZE
644 subi r1,r1,STACK_FRAME_OVERHEAD
645
646 cmpwi 0,r23,0
647 beq iSeries_secondary_smp_loop /* Loop until told to go */
648#ifdef SECONDARY_PROCESSORS
649 bne .__secondary_start /* Loop until told to go */
650#endif
651iSeries_secondary_smp_loop:
652 /* Let the Hypervisor know we are alive */
653 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
654 lis r3,0x8002
655 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
656#else /* CONFIG_SMP */
657 /* Yield the processor. This is required for non-SMP kernels
658 which are running on multi-threaded machines. */
659 lis r3,0x8000
660 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
661 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
662 li r4,0 /* "yield timed" */
663 li r5,-1 /* "yield forever" */
664#endif /* CONFIG_SMP */
665 li r0,-1 /* r0=-1 indicates a Hypervisor call */
666 sc /* Invoke the hypervisor via a system call */
667 mfspr r13,SPRG3 /* Put r13 back ???? */
668 b 1b /* If SMP not configured, secondaries
669 * loop forever */
670
671 .globl decrementer_iSeries_masked
672decrementer_iSeries_masked:
673 li r11,1
674 stb r11,PACALPPACA+LPPACADECRINT(r13)
675 lwz r12,PACADEFAULTDECR(r13)
676 mtspr SPRN_DEC,r12
677 /* fall through */
678
679 .globl hardware_interrupt_iSeries_masked
680hardware_interrupt_iSeries_masked:
681 mtcrf 0x80,r9 /* Restore regs */
682 ld r11,PACALPPACA+LPPACASRR0(r13)
683 ld r12,PACALPPACA+LPPACASRR1(r13)
684 mtspr SRR0,r11
685 mtspr SRR1,r12
686 ld r9,PACA_EXGEN+EX_R9(r13)
687 ld r10,PACA_EXGEN+EX_R10(r13)
688 ld r11,PACA_EXGEN+EX_R11(r13)
689 ld r12,PACA_EXGEN+EX_R12(r13)
690 ld r13,PACA_EXGEN+EX_R13(r13)
691 rfid
692 b . /* prevent speculative execution */
693#endif
694
695/*
696 * Data area reserved for FWNMI option.
697 */
698 .= 0x7000
699 .globl fwnmi_data_area
700fwnmi_data_area:
701
702/*
703 * Vectors for the FWNMI option. Share common code.
704 */
705 . = 0x8000
706 .globl system_reset_fwnmi
707system_reset_fwnmi:
708 HMT_MEDIUM
709 mtspr SPRG1,r13 /* save r13 */
710 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
711 .globl machine_check_fwnmi
712machine_check_fwnmi:
713 HMT_MEDIUM
714 mtspr SPRG1,r13 /* save r13 */
715 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
716
717 /*
718 * Space for the initial segment table
719 * For LPAR, the hypervisor must fill in at least one entry
720 * before we get control (with relocate on)
721 */
722 . = STAB0_PHYS_ADDR
723 .globl __start_stab
724__start_stab:
725
726 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
727 .globl __end_stab
728__end_stab:
729
730
731/*** Common interrupt handlers ***/
732
733 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
734
735 /*
736 * Machine check is different because we use a different
737 * save area: PACA_EXMC instead of PACA_EXGEN.
738 */
739 .align 7
740 .globl machine_check_common
741machine_check_common:
742 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
743 DISABLE_INTS
744 bl .save_nvgprs
745 addi r3,r1,STACK_FRAME_OVERHEAD
746 bl .machine_check_exception
747 b .ret_from_except
748
749 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
750 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
751 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
752 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
753 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
754 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
755 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
756#ifdef CONFIG_ALTIVEC
757 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
758#else
759 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
760#endif
761
762/*
763 * Here we have detected that the kernel stack pointer is bad.
764 * R9 contains the saved CR, r13 points to the paca,
765 * r10 contains the (bad) kernel stack pointer,
766 * r11 and r12 contain the saved SRR0 and SRR1.
767 * We switch to using the paca guard page as an emergency stack,
768 * save the registers there, and call kernel_bad_stack(), which panics.
769 */
770bad_stack:
771 ld r1,PACAEMERGSP(r13)
772 subi r1,r1,64+INT_FRAME_SIZE
773 std r9,_CCR(r1)
774 std r10,GPR1(r1)
775 std r11,_NIP(r1)
776 std r12,_MSR(r1)
777 mfspr r11,DAR
778 mfspr r12,DSISR
779 std r11,_DAR(r1)
780 std r12,_DSISR(r1)
781 mflr r10
782 mfctr r11
783 mfxer r12
784 std r10,_LINK(r1)
785 std r11,_CTR(r1)
786 std r12,_XER(r1)
787 SAVE_GPR(0,r1)
788 SAVE_GPR(2,r1)
789 SAVE_4GPRS(3,r1)
790 SAVE_2GPRS(7,r1)
791 SAVE_10GPRS(12,r1)
792 SAVE_10GPRS(22,r1)
793 addi r11,r1,INT_FRAME_SIZE
794 std r11,0(r1)
795 li r12,0
796 std r12,0(r11)
797 ld r2,PACATOC(r13)
7981: addi r3,r1,STACK_FRAME_OVERHEAD
799 bl .kernel_bad_stack
800 b 1b
801
802/*
803 * Return from an exception with minimal checks.
804 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
805 * If interrupts have been enabled, or anything has been
806 * done that might have changed the scheduling status of
807 * any task or sent any task a signal, you should use
808 * ret_from_except or ret_from_except_lite instead of this.
809 */
810fast_exception_return:
811 ld r12,_MSR(r1)
812 ld r11,_NIP(r1)
813 andi. r3,r12,MSR_RI /* check if RI is set */
814 beq- unrecov_fer
815 ld r3,_CCR(r1)
816 ld r4,_LINK(r1)
817 ld r5,_CTR(r1)
818 ld r6,_XER(r1)
819 mtcr r3
820 mtlr r4
821 mtctr r5
822 mtxer r6
823 REST_GPR(0, r1)
824 REST_8GPRS(2, r1)
825
826 mfmsr r10
827 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
828 mtmsrd r10,1
829
830 mtspr SRR1,r12
831 mtspr SRR0,r11
832 REST_4GPRS(10, r1)
833 ld r1,GPR1(r1)
834 rfid
835 b . /* prevent speculative execution */
836
837unrecov_fer:
838 bl .save_nvgprs
8391: addi r3,r1,STACK_FRAME_OVERHEAD
840 bl .unrecoverable_exception
841 b 1b
842
843/*
844 * Here r13 points to the paca, r9 contains the saved CR,
845 * SRR0 and SRR1 are saved in r11 and r12,
846 * r9 - r13 are saved in paca->exgen.
847 */
848 .align 7
849 .globl data_access_common
850data_access_common:
851 mfspr r10,DAR
852 std r10,PACA_EXGEN+EX_DAR(r13)
853 mfspr r10,DSISR
854 stw r10,PACA_EXGEN+EX_DSISR(r13)
855 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
856 ld r3,PACA_EXGEN+EX_DAR(r13)
857 lwz r4,PACA_EXGEN+EX_DSISR(r13)
858 li r5,0x300
859 b .do_hash_page /* Try to handle as hpte fault */
860
861 .align 7
862 .globl instruction_access_common
863instruction_access_common:
864 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
865 ld r3,_NIP(r1)
866 andis. r4,r12,0x5820
867 li r5,0x400
868 b .do_hash_page /* Try to handle as hpte fault */
869
870 .align 7
871 .globl hardware_interrupt_common
872 .globl hardware_interrupt_entry
873hardware_interrupt_common:
874 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
875hardware_interrupt_entry:
876 DISABLE_INTS
877 addi r3,r1,STACK_FRAME_OVERHEAD
878 bl .do_IRQ
879 b .ret_from_except_lite
880
881 .align 7
882 .globl alignment_common
883alignment_common:
884 mfspr r10,DAR
885 std r10,PACA_EXGEN+EX_DAR(r13)
886 mfspr r10,DSISR
887 stw r10,PACA_EXGEN+EX_DSISR(r13)
888 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
889 ld r3,PACA_EXGEN+EX_DAR(r13)
890 lwz r4,PACA_EXGEN+EX_DSISR(r13)
891 std r3,_DAR(r1)
892 std r4,_DSISR(r1)
893 bl .save_nvgprs
894 addi r3,r1,STACK_FRAME_OVERHEAD
895 ENABLE_INTS
896 bl .alignment_exception
897 b .ret_from_except
898
899 .align 7
900 .globl program_check_common
901program_check_common:
902 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
903 bl .save_nvgprs
904 addi r3,r1,STACK_FRAME_OVERHEAD
905 ENABLE_INTS
906 bl .program_check_exception
907 b .ret_from_except
908
909 .align 7
910 .globl fp_unavailable_common
911fp_unavailable_common:
912 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
913 bne .load_up_fpu /* if from user, just load it up */
914 bl .save_nvgprs
915 addi r3,r1,STACK_FRAME_OVERHEAD
916 ENABLE_INTS
917 bl .kernel_fp_unavailable_exception
918 BUG_OPCODE
919
920 .align 7
921 .globl altivec_unavailable_common
922altivec_unavailable_common:
923 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
924#ifdef CONFIG_ALTIVEC
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700925BEGIN_FTR_SECTION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 bne .load_up_altivec /* if from user, just load it up */
Benjamin Herrenschmidt187335a2005-04-16 15:24:36 -0700927END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928#endif
929 bl .save_nvgprs
930 addi r3,r1,STACK_FRAME_OVERHEAD
931 ENABLE_INTS
932 bl .altivec_unavailable_exception
933 b .ret_from_except
934
935/*
936 * Hash table stuff
937 */
938 .align 7
939_GLOBAL(do_hash_page)
940 std r3,_DAR(r1)
941 std r4,_DSISR(r1)
942
943 andis. r0,r4,0xa450 /* weird error? */
944 bne- .handle_page_fault /* if not, try to insert a HPTE */
945BEGIN_FTR_SECTION
946 andis. r0,r4,0x0020 /* Is it a segment table fault? */
947 bne- .do_ste_alloc /* If so handle it */
948END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
949
950 /*
951 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
952 * accessing a userspace segment (even from the kernel). We assume
953 * kernel addresses always have the high bit set.
954 */
955 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
956 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
957 orc r0,r12,r0 /* MSR_PR | ~high_bit */
958 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
959 ori r4,r4,1 /* add _PAGE_PRESENT */
960 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
961
962 /*
963 * On iSeries, we soft-disable interrupts here, then
964 * hard-enable interrupts so that the hash_page code can spin on
965 * the hash_table_lock without problems on a shared processor.
966 */
967 DISABLE_INTS
968
969 /*
970 * r3 contains the faulting address
971 * r4 contains the required access permissions
972 * r5 contains the trap number
973 *
974 * at return r3 = 0 for success
975 */
976 bl .hash_page /* build HPTE if possible */
977 cmpdi r3,0 /* see if hash_page succeeded */
978
979#ifdef DO_SOFT_DISABLE
980 /*
981 * If we had interrupts soft-enabled at the point where the
982 * DSI/ISI occurred, and an interrupt came in during hash_page,
983 * handle it now.
984 * We jump to ret_from_except_lite rather than fast_exception_return
985 * because ret_from_except_lite will check for and handle pending
986 * interrupts if necessary.
987 */
988 beq .ret_from_except_lite
989 /* For a hash failure, we don't bother re-enabling interrupts */
990 ble- 12f
991
992 /*
993 * hash_page couldn't handle it, set soft interrupt enable back
994 * to what it was before the trap. Note that .local_irq_restore
995 * handles any interrupts pending at this point.
996 */
997 ld r3,SOFTE(r1)
998 bl .local_irq_restore
999 b 11f
1000#else
1001 beq fast_exception_return /* Return from exception on success */
1002 ble- 12f /* Failure return from hash_page */
1003
1004 /* fall through */
1005#endif
1006
1007/* Here we have a page fault that hash_page can't handle. */
1008_GLOBAL(handle_page_fault)
1009 ENABLE_INTS
101011: ld r4,_DAR(r1)
1011 ld r5,_DSISR(r1)
1012 addi r3,r1,STACK_FRAME_OVERHEAD
1013 bl .do_page_fault
1014 cmpdi r3,0
1015 beq+ .ret_from_except_lite
1016 bl .save_nvgprs
1017 mr r5,r3
1018 addi r3,r1,STACK_FRAME_OVERHEAD
1019 lwz r4,_DAR(r1)
1020 bl .bad_page_fault
1021 b .ret_from_except
1022
1023/* We have a page fault that hash_page could handle but HV refused
1024 * the PTE insertion
1025 */
102612: bl .save_nvgprs
1027 addi r3,r1,STACK_FRAME_OVERHEAD
1028 lwz r4,_DAR(r1)
1029 bl .low_hash_fault
1030 b .ret_from_except
1031
1032 /* here we have a segment miss */
1033_GLOBAL(do_ste_alloc)
1034 bl .ste_allocate /* try to insert stab entry */
1035 cmpdi r3,0
1036 beq+ fast_exception_return
1037 b .handle_page_fault
1038
1039/*
1040 * r13 points to the PACA, r9 contains the saved CR,
1041 * r11 and r12 contain the saved SRR0 and SRR1.
1042 * r9 - r13 are saved in paca->exslb.
1043 * We assume we aren't going to take any exceptions during this procedure.
1044 * We assume (DAR >> 60) == 0xc.
1045 */
1046 .align 7
1047_GLOBAL(do_stab_bolted)
1048 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1049 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1050
1051 /* Hash to the primary group */
1052 ld r10,PACASTABVIRT(r13)
1053 mfspr r11,DAR
1054 srdi r11,r11,28
1055 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1056
1057 /* Calculate VSID */
1058 /* This is a kernel address, so protovsid = ESID */
1059 ASM_VSID_SCRAMBLE(r11, r9)
1060 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1061
1062 /* Search the primary group for a free entry */
10631: ld r11,0(r10) /* Test valid bit of the current ste */
1064 andi. r11,r11,0x80
1065 beq 2f
1066 addi r10,r10,16
1067 andi. r11,r10,0x70
1068 bne 1b
1069
1070 /* Stick for only searching the primary group for now. */
1071 /* At least for now, we use a very simple random castout scheme */
1072 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1073 mftb r11
1074 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1075 ori r11,r11,0x10
1076
1077 /* r10 currently points to an ste one past the group of interest */
1078 /* make it point to the randomly selected entry */
1079 subi r10,r10,128
1080 or r10,r10,r11 /* r10 is the entry to invalidate */
1081
1082 isync /* mark the entry invalid */
1083 ld r11,0(r10)
1084 rldicl r11,r11,56,1 /* clear the valid bit */
1085 rotldi r11,r11,8
1086 std r11,0(r10)
1087 sync
1088
1089 clrrdi r11,r11,28 /* Get the esid part of the ste */
1090 slbie r11
1091
10922: std r9,8(r10) /* Store the vsid part of the ste */
1093 eieio
1094
1095 mfspr r11,DAR /* Get the new esid */
1096 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1097 ori r11,r11,0x90 /* Turn on valid and kp */
1098 std r11,0(r10) /* Put new entry back into the stab */
1099
1100 sync
1101
1102 /* All done -- return from exception. */
1103 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1104 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1105
1106 andi. r10,r12,MSR_RI
1107 beq- unrecov_slb
1108
1109 mtcrf 0x80,r9 /* restore CR */
1110
1111 mfmsr r10
1112 clrrdi r10,r10,2
1113 mtmsrd r10,1
1114
1115 mtspr SRR0,r11
1116 mtspr SRR1,r12
1117 ld r9,PACA_EXSLB+EX_R9(r13)
1118 ld r10,PACA_EXSLB+EX_R10(r13)
1119 ld r11,PACA_EXSLB+EX_R11(r13)
1120 ld r12,PACA_EXSLB+EX_R12(r13)
1121 ld r13,PACA_EXSLB+EX_R13(r13)
1122 rfid
1123 b . /* prevent speculative execution */
1124
1125/*
1126 * r13 points to the PACA, r9 contains the saved CR,
1127 * r11 and r12 contain the saved SRR0 and SRR1.
1128 * r3 has the faulting address
1129 * r9 - r13 are saved in paca->exslb.
1130 * r3 is saved in paca->slb_r3
1131 * We assume we aren't going to take any exceptions during this procedure.
1132 */
1133_GLOBAL(do_slb_miss)
1134 mflr r10
1135
1136 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1137 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1138
1139 bl .slb_allocate /* handle it */
1140
1141 /* All done -- return from exception. */
1142
1143 ld r10,PACA_EXSLB+EX_LR(r13)
1144 ld r3,PACA_EXSLB+EX_R3(r13)
1145 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1146#ifdef CONFIG_PPC_ISERIES
1147 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1148#endif /* CONFIG_PPC_ISERIES */
1149
1150 mtlr r10
1151
1152 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1153 beq- unrecov_slb
1154
1155.machine push
1156.machine "power4"
1157 mtcrf 0x80,r9
1158 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1159.machine pop
1160
1161#ifdef CONFIG_PPC_ISERIES
1162 mtspr SRR0,r11
1163 mtspr SRR1,r12
1164#endif /* CONFIG_PPC_ISERIES */
1165 ld r9,PACA_EXSLB+EX_R9(r13)
1166 ld r10,PACA_EXSLB+EX_R10(r13)
1167 ld r11,PACA_EXSLB+EX_R11(r13)
1168 ld r12,PACA_EXSLB+EX_R12(r13)
1169 ld r13,PACA_EXSLB+EX_R13(r13)
1170 rfid
1171 b . /* prevent speculative execution */
1172
1173unrecov_slb:
1174 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1175 DISABLE_INTS
1176 bl .save_nvgprs
11771: addi r3,r1,STACK_FRAME_OVERHEAD
1178 bl .unrecoverable_exception
1179 b 1b
1180
1181
1182/*
1183 * On pSeries, secondary processors spin in the following code.
1184 * At entry, r3 = this processor's number (physical cpu id)
1185 */
1186_GLOBAL(pSeries_secondary_smp_init)
1187 mr r24,r3
1188
1189 /* turn on 64-bit mode */
1190 bl .enable_64b_mode
1191 isync
1192
1193 /* Copy some CPU settings from CPU 0 */
1194 bl .__restore_cpu_setup
1195
1196 /* Set up a paca value for this processor. Since we have the
1197 * physical cpu id in r3, we need to search the pacas to find
1198 * which logical id maps to our physical one.
1199 */
1200 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1201 li r5,0 /* logical cpu id */
12021: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1203 cmpw r6,r24 /* Compare to our id */
1204 beq 2f
1205 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1206 addi r5,r5,1
1207 cmpwi r5,NR_CPUS
1208 blt 1b
1209
121099: HMT_LOW /* Couldn't find our CPU id */
1211 b 99b
1212
12132: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1214 /* From now on, r24 is expected to be logica cpuid */
1215 mr r24,r5
12163: HMT_LOW
1217 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1218 /* start. */
1219 sync
1220
1221 /* Create a temp kernel stack for use before relocation is on. */
1222 ld r1,PACAEMERGSP(r13)
1223 subi r1,r1,STACK_FRAME_OVERHEAD
1224
1225 cmpwi 0,r23,0
1226#ifdef CONFIG_SMP
1227#ifdef SECONDARY_PROCESSORS
1228 bne .__secondary_start
1229#endif
1230#endif
1231 b 3b /* Loop until told to go */
1232
1233#ifdef CONFIG_PPC_ISERIES
1234_STATIC(__start_initialization_iSeries)
1235 /* Clear out the BSS */
1236 LOADADDR(r11,__bss_stop)
1237 LOADADDR(r8,__bss_start)
1238 sub r11,r11,r8 /* bss size */
1239 addi r11,r11,7 /* round up to an even double word */
1240 rldicl. r11,r11,61,3 /* shift right by 3 */
1241 beq 4f
1242 addi r8,r8,-8
1243 li r0,0
1244 mtctr r11 /* zero this many doublewords */
12453: stdu r0,8(r8)
1246 bdnz 3b
12474:
1248 LOADADDR(r1,init_thread_union)
1249 addi r1,r1,THREAD_SIZE
1250 li r0,0
1251 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1252
1253 LOADADDR(r3,cpu_specs)
1254 LOADADDR(r4,cur_cpu_spec)
1255 li r5,0
1256 bl .identify_cpu
1257
1258 LOADADDR(r2,__toc_start)
1259 addi r2,r2,0x4000
1260 addi r2,r2,0x4000
1261
1262 bl .iSeries_early_setup
1263
1264 /* relocation is on at this point */
1265
1266 b .start_here_common
1267#endif /* CONFIG_PPC_ISERIES */
1268
1269#ifdef CONFIG_PPC_MULTIPLATFORM
1270
1271_STATIC(__mmu_off)
1272 mfmsr r3
1273 andi. r0,r3,MSR_IR|MSR_DR
1274 beqlr
1275 andc r3,r3,r0
1276 mtspr SPRN_SRR0,r4
1277 mtspr SPRN_SRR1,r3
1278 sync
1279 rfid
1280 b . /* prevent speculative execution */
1281
1282
1283/*
1284 * Here is our main kernel entry point. We support currently 2 kind of entries
1285 * depending on the value of r5.
1286 *
1287 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1288 * in r3...r7
1289 *
1290 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1291 * DT block, r4 is a physical pointer to the kernel itself
1292 *
1293 */
1294_GLOBAL(__start_initialization_multiplatform)
1295 /*
1296 * Are we booted from a PROM Of-type client-interface ?
1297 */
1298 cmpldi cr0,r5,0
1299 bne .__boot_from_prom /* yes -> prom */
1300
1301 /* Save parameters */
1302 mr r31,r3
1303 mr r30,r4
1304
1305 /* Make sure we are running in 64 bits mode */
1306 bl .enable_64b_mode
1307
1308 /* Setup some critical 970 SPRs before switching MMU off */
1309 bl .__970_cpu_preinit
1310
1311 /* cpu # */
1312 li r24,0
1313
1314 /* Switch off MMU if not already */
1315 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1316 add r4,r4,r30
1317 bl .__mmu_off
1318 b .__after_prom_start
1319
1320_STATIC(__boot_from_prom)
1321 /* Save parameters */
1322 mr r31,r3
1323 mr r30,r4
1324 mr r29,r5
1325 mr r28,r6
1326 mr r27,r7
1327
1328 /* Make sure we are running in 64 bits mode */
1329 bl .enable_64b_mode
1330
1331 /* put a relocation offset into r3 */
1332 bl .reloc_offset
1333
1334 LOADADDR(r2,__toc_start)
1335 addi r2,r2,0x4000
1336 addi r2,r2,0x4000
1337
1338 /* Relocate the TOC from a virt addr to a real addr */
1339 sub r2,r2,r3
1340
1341 /* Restore parameters */
1342 mr r3,r31
1343 mr r4,r30
1344 mr r5,r29
1345 mr r6,r28
1346 mr r7,r27
1347
1348 /* Do all of the interaction with OF client interface */
1349 bl .prom_init
1350 /* We never return */
1351 trap
1352
1353/*
1354 * At this point, r3 contains the physical address we are running at,
1355 * returned by prom_init()
1356 */
1357_STATIC(__after_prom_start)
1358
1359/*
1360 * We need to run with __start at physical address 0.
1361 * This will leave some code in the first 256B of
1362 * real memory, which are reserved for software use.
1363 * The remainder of the first page is loaded with the fixed
1364 * interrupt vectors. The next two pages are filled with
1365 * unknown exception placeholders.
1366 *
1367 * Note: This process overwrites the OF exception vectors.
1368 * r26 == relocation offset
1369 * r27 == KERNELBASE
1370 */
1371 bl .reloc_offset
1372 mr r26,r3
1373 SET_REG_TO_CONST(r27,KERNELBASE)
1374
1375 li r3,0 /* target addr */
1376
1377 // XXX FIXME: Use phys returned by OF (r30)
1378 sub r4,r27,r26 /* source addr */
1379 /* current address of _start */
1380 /* i.e. where we are running */
1381 /* the source addr */
1382
1383 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1384 sub r5,r5,r27
1385
1386 li r6,0x100 /* Start offset, the first 0x100 */
1387 /* bytes were copied earlier. */
1388
1389 bl .copy_and_flush /* copy the first n bytes */
1390 /* this includes the code being */
1391 /* executed here. */
1392
1393 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1394 mtctr r0 /* that we just made/relocated */
1395 bctr
1396
13974: LOADADDR(r5,klimit)
1398 sub r5,r5,r26
1399 ld r5,0(r5) /* get the value of klimit */
1400 sub r5,r5,r27
1401 bl .copy_and_flush /* copy the rest */
1402 b .start_here_multiplatform
1403
1404#endif /* CONFIG_PPC_MULTIPLATFORM */
1405
1406/*
1407 * Copy routine used to copy the kernel to start at physical address 0
1408 * and flush and invalidate the caches as needed.
1409 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1410 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1411 *
1412 * Note: this routine *only* clobbers r0, r6 and lr
1413 */
1414_GLOBAL(copy_and_flush)
1415 addi r5,r5,-8
1416 addi r6,r6,-8
14174: li r0,16 /* Use the least common */
1418 /* denominator cache line */
1419 /* size. This results in */
1420 /* extra cache line flushes */
1421 /* but operation is correct. */
1422 /* Can't get cache line size */
1423 /* from NACA as it is being */
1424 /* moved too. */
1425
1426 mtctr r0 /* put # words/line in ctr */
14273: addi r6,r6,8 /* copy a cache line */
1428 ldx r0,r6,r4
1429 stdx r0,r6,r3
1430 bdnz 3b
1431 dcbst r6,r3 /* write it to memory */
1432 sync
1433 icbi r6,r3 /* flush the icache line */
1434 cmpld 0,r6,r5
1435 blt 4b
1436 sync
1437 addi r5,r5,8
1438 addi r6,r6,8
1439 blr
1440
1441.align 8
1442copy_to_here:
1443
1444/*
1445 * load_up_fpu(unused, unused, tsk)
1446 * Disable FP for the task which had the FPU previously,
1447 * and save its floating-point registers in its thread_struct.
1448 * Enables the FPU for use in the kernel on return.
1449 * On SMP we know the fpu is free, since we give it up every
1450 * switch (ie, no lazy save of the FP registers).
1451 * On entry: r13 == 'current' && last_task_used_math != 'current'
1452 */
1453_STATIC(load_up_fpu)
1454 mfmsr r5 /* grab the current MSR */
1455 ori r5,r5,MSR_FP
1456 mtmsrd r5 /* enable use of fpu now */
1457 isync
1458/*
1459 * For SMP, we don't do lazy FPU switching because it just gets too
1460 * horrendously complex, especially when a task switches from one CPU
1461 * to another. Instead we call giveup_fpu in switch_to.
1462 *
1463 */
1464#ifndef CONFIG_SMP
1465 ld r3,last_task_used_math@got(r2)
1466 ld r4,0(r3)
1467 cmpdi 0,r4,0
1468 beq 1f
1469 /* Save FP state to last_task_used_math's THREAD struct */
1470 addi r4,r4,THREAD
1471 SAVE_32FPRS(0, r4)
1472 mffs fr0
1473 stfd fr0,THREAD_FPSCR(r4)
1474 /* Disable FP for last_task_used_math */
1475 ld r5,PT_REGS(r4)
1476 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1477 li r6,MSR_FP|MSR_FE0|MSR_FE1
1478 andc r4,r4,r6
1479 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14801:
1481#endif /* CONFIG_SMP */
1482 /* enable use of FP after return */
1483 ld r4,PACACURRENT(r13)
1484 addi r5,r4,THREAD /* Get THREAD */
1485 ld r4,THREAD_FPEXC_MODE(r5)
1486 ori r12,r12,MSR_FP
1487 or r12,r12,r4
1488 std r12,_MSR(r1)
1489 lfd fr0,THREAD_FPSCR(r5)
1490 mtfsf 0xff,fr0
1491 REST_32FPRS(0, r5)
1492#ifndef CONFIG_SMP
1493 /* Update last_task_used_math to 'current' */
1494 subi r4,r5,THREAD /* Back to 'current' */
1495 std r4,0(r3)
1496#endif /* CONFIG_SMP */
1497 /* restore registers and return */
1498 b fast_exception_return
1499
1500/*
1501 * disable_kernel_fp()
1502 * Disable the FPU.
1503 */
1504_GLOBAL(disable_kernel_fp)
1505 mfmsr r3
1506 rldicl r0,r3,(63-MSR_FP_LG),1
1507 rldicl r3,r0,(MSR_FP_LG+1),0
1508 mtmsrd r3 /* disable use of fpu now */
1509 isync
1510 blr
1511
1512/*
1513 * giveup_fpu(tsk)
1514 * Disable FP for the task given as the argument,
1515 * and save the floating-point registers in its thread_struct.
1516 * Enables the FPU for use in the kernel on return.
1517 */
1518_GLOBAL(giveup_fpu)
1519 mfmsr r5
1520 ori r5,r5,MSR_FP
1521 mtmsrd r5 /* enable use of fpu now */
1522 isync
1523 cmpdi 0,r3,0
1524 beqlr- /* if no previous owner, done */
1525 addi r3,r3,THREAD /* want THREAD of task */
1526 ld r5,PT_REGS(r3)
1527 cmpdi 0,r5,0
1528 SAVE_32FPRS(0, r3)
1529 mffs fr0
1530 stfd fr0,THREAD_FPSCR(r3)
1531 beq 1f
1532 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1533 li r3,MSR_FP|MSR_FE0|MSR_FE1
1534 andc r4,r4,r3 /* disable FP for previous task */
1535 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15361:
1537#ifndef CONFIG_SMP
1538 li r5,0
1539 ld r4,last_task_used_math@got(r2)
1540 std r5,0(r4)
1541#endif /* CONFIG_SMP */
1542 blr
1543
1544
1545#ifdef CONFIG_ALTIVEC
1546
1547/*
1548 * load_up_altivec(unused, unused, tsk)
1549 * Disable VMX for the task which had it previously,
1550 * and save its vector registers in its thread_struct.
1551 * Enables the VMX for use in the kernel on return.
1552 * On SMP we know the VMX is free, since we give it up every
1553 * switch (ie, no lazy save of the vector registers).
1554 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1555 */
1556_STATIC(load_up_altivec)
1557 mfmsr r5 /* grab the current MSR */
1558 oris r5,r5,MSR_VEC@h
1559 mtmsrd r5 /* enable use of VMX now */
1560 isync
1561
1562/*
1563 * For SMP, we don't do lazy VMX switching because it just gets too
1564 * horrendously complex, especially when a task switches from one CPU
1565 * to another. Instead we call giveup_altvec in switch_to.
1566 * VRSAVE isn't dealt with here, that is done in the normal context
1567 * switch code. Note that we could rely on vrsave value to eventually
1568 * avoid saving all of the VREGs here...
1569 */
1570#ifndef CONFIG_SMP
1571 ld r3,last_task_used_altivec@got(r2)
1572 ld r4,0(r3)
1573 cmpdi 0,r4,0
1574 beq 1f
1575 /* Save VMX state to last_task_used_altivec's THREAD struct */
1576 addi r4,r4,THREAD
1577 SAVE_32VRS(0,r5,r4)
1578 mfvscr vr0
1579 li r10,THREAD_VSCR
1580 stvx vr0,r10,r4
1581 /* Disable VMX for last_task_used_altivec */
1582 ld r5,PT_REGS(r4)
1583 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1584 lis r6,MSR_VEC@h
1585 andc r4,r4,r6
1586 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15871:
1588#endif /* CONFIG_SMP */
1589 /* Hack: if we get an altivec unavailable trap with VRSAVE
1590 * set to all zeros, we assume this is a broken application
1591 * that fails to set it properly, and thus we switch it to
1592 * all 1's
1593 */
1594 mfspr r4,SPRN_VRSAVE
1595 cmpdi 0,r4,0
1596 bne+ 1f
1597 li r4,-1
1598 mtspr SPRN_VRSAVE,r4
15991:
1600 /* enable use of VMX after return */
1601 ld r4,PACACURRENT(r13)
1602 addi r5,r4,THREAD /* Get THREAD */
1603 oris r12,r12,MSR_VEC@h
1604 std r12,_MSR(r1)
1605 li r4,1
1606 li r10,THREAD_VSCR
1607 stw r4,THREAD_USED_VR(r5)
1608 lvx vr0,r10,r5
1609 mtvscr vr0
1610 REST_32VRS(0,r4,r5)
1611#ifndef CONFIG_SMP
1612 /* Update last_task_used_math to 'current' */
1613 subi r4,r5,THREAD /* Back to 'current' */
1614 std r4,0(r3)
1615#endif /* CONFIG_SMP */
1616 /* restore registers and return */
1617 b fast_exception_return
1618
1619/*
1620 * disable_kernel_altivec()
1621 * Disable the VMX.
1622 */
1623_GLOBAL(disable_kernel_altivec)
1624 mfmsr r3
1625 rldicl r0,r3,(63-MSR_VEC_LG),1
1626 rldicl r3,r0,(MSR_VEC_LG+1),0
1627 mtmsrd r3 /* disable use of VMX now */
1628 isync
1629 blr
1630
1631/*
1632 * giveup_altivec(tsk)
1633 * Disable VMX for the task given as the argument,
1634 * and save the vector registers in its thread_struct.
1635 * Enables the VMX for use in the kernel on return.
1636 */
1637_GLOBAL(giveup_altivec)
1638 mfmsr r5
1639 oris r5,r5,MSR_VEC@h
1640 mtmsrd r5 /* enable use of VMX now */
1641 isync
1642 cmpdi 0,r3,0
1643 beqlr- /* if no previous owner, done */
1644 addi r3,r3,THREAD /* want THREAD of task */
1645 ld r5,PT_REGS(r3)
1646 cmpdi 0,r5,0
1647 SAVE_32VRS(0,r4,r3)
1648 mfvscr vr0
1649 li r4,THREAD_VSCR
1650 stvx vr0,r4,r3
1651 beq 1f
1652 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1653 lis r3,MSR_VEC@h
1654 andc r4,r4,r3 /* disable FP for previous task */
1655 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16561:
1657#ifndef CONFIG_SMP
1658 li r5,0
1659 ld r4,last_task_used_altivec@got(r2)
1660 std r5,0(r4)
1661#endif /* CONFIG_SMP */
1662 blr
1663
1664#endif /* CONFIG_ALTIVEC */
1665
1666#ifdef CONFIG_SMP
1667#ifdef CONFIG_PPC_PMAC
1668/*
1669 * On PowerMac, secondary processors starts from the reset vector, which
1670 * is temporarily turned into a call to one of the functions below.
1671 */
1672 .section ".text";
1673 .align 2 ;
1674
1675 .globl pmac_secondary_start_1
1676pmac_secondary_start_1:
1677 li r24, 1
1678 b .pmac_secondary_start
1679
1680 .globl pmac_secondary_start_2
1681pmac_secondary_start_2:
1682 li r24, 2
1683 b .pmac_secondary_start
1684
1685 .globl pmac_secondary_start_3
1686pmac_secondary_start_3:
1687 li r24, 3
1688 b .pmac_secondary_start
1689
1690_GLOBAL(pmac_secondary_start)
1691 /* turn on 64-bit mode */
1692 bl .enable_64b_mode
1693 isync
1694
1695 /* Copy some CPU settings from CPU 0 */
1696 bl .__restore_cpu_setup
1697
1698 /* pSeries do that early though I don't think we really need it */
1699 mfmsr r3
1700 ori r3,r3,MSR_RI
1701 mtmsrd r3 /* RI on */
1702
1703 /* Set up a paca value for this processor. */
1704 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1705 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1706 add r13,r13,r4 /* for this processor. */
1707 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1708
1709 /* Create a temp kernel stack for use before relocation is on. */
1710 ld r1,PACAEMERGSP(r13)
1711 subi r1,r1,STACK_FRAME_OVERHEAD
1712
1713 b .__secondary_start
1714
1715#endif /* CONFIG_PPC_PMAC */
1716
1717/*
1718 * This function is called after the master CPU has released the
1719 * secondary processors. The execution environment is relocation off.
1720 * The paca for this processor has the following fields initialized at
1721 * this point:
1722 * 1. Processor number
1723 * 2. Segment table pointer (virtual address)
1724 * On entry the following are set:
1725 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1726 * r24 = cpu# (in Linux terms)
1727 * r13 = paca virtual address
1728 * SPRG3 = paca virtual address
1729 */
1730_GLOBAL(__secondary_start)
1731
1732 HMT_MEDIUM /* Set thread priority to MEDIUM */
1733
1734 ld r2,PACATOC(r13)
1735 li r6,0
1736 stb r6,PACAPROCENABLED(r13)
1737
1738#ifndef CONFIG_PPC_ISERIES
1739 /* Initialize the page table pointer register. */
1740 LOADADDR(r6,_SDR1)
1741 ld r6,0(r6) /* get the value of _SDR1 */
1742 mtspr SDR1,r6 /* set the htab location */
1743#endif
1744 /* Initialize the first segment table (or SLB) entry */
1745 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1746 bl .stab_initialize
1747
1748 /* Initialize the kernel stack. Just a repeat for iSeries. */
1749 LOADADDR(r3,current_set)
1750 sldi r28,r24,3 /* get current_set[cpu#] */
1751 ldx r1,r3,r28
1752 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1753 std r1,PACAKSAVE(r13)
1754
1755 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1756 ori r4,r3,1 /* turn on valid bit */
1757
1758#ifdef CONFIG_PPC_ISERIES
1759 li r0,-1 /* hypervisor call */
1760 li r3,1
1761 sldi r3,r3,63 /* 0x8000000000000000 */
1762 ori r3,r3,4 /* 0x8000000000000004 */
1763 sc /* HvCall_setASR */
1764#else
1765 /* set the ASR */
1766 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1767 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1768 cmpldi r3,PLATFORM_PSERIES_LPAR
1769 bne 98f
1770 mfspr r3,PVR
1771 srwi r3,r3,16
1772 cmpwi r3,0x37 /* SStar */
1773 beq 97f
1774 cmpwi r3,0x36 /* IStar */
1775 beq 97f
1776 cmpwi r3,0x34 /* Pulsar */
1777 bne 98f
177897: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1779 HVSC /* Invoking hcall */
1780 b 99f
178198: /* !(rpa hypervisor) || !(star) */
1782 mtasr r4 /* set the stab location */
178399:
1784#endif
1785 li r7,0
1786 mtlr r7
1787
1788 /* enable MMU and jump to start_secondary */
1789 LOADADDR(r3,.start_secondary_prolog)
1790 SET_REG_TO_CONST(r4, MSR_KERNEL)
1791#ifdef DO_SOFT_DISABLE
1792 ori r4,r4,MSR_EE
1793#endif
1794 mtspr SRR0,r3
1795 mtspr SRR1,r4
1796 rfid
1797 b . /* prevent speculative execution */
1798
1799/*
1800 * Running with relocation on at this point. All we want to do is
1801 * zero the stack back-chain pointer before going into C code.
1802 */
1803_GLOBAL(start_secondary_prolog)
1804 li r3,0
1805 std r3,0(r1) /* Zero the stack frame pointer */
1806 bl .start_secondary
1807#endif
1808
1809/*
1810 * This subroutine clobbers r11 and r12
1811 */
1812_GLOBAL(enable_64b_mode)
1813 mfmsr r11 /* grab the current MSR */
1814 li r12,1
1815 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1816 or r11,r11,r12
1817 li r12,1
1818 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1819 or r11,r11,r12
1820 mtmsrd r11
1821 isync
1822 blr
1823
1824#ifdef CONFIG_PPC_MULTIPLATFORM
1825/*
1826 * This is where the main kernel code starts.
1827 */
1828_STATIC(start_here_multiplatform)
1829 /* get a new offset, now that the kernel has moved. */
1830 bl .reloc_offset
1831 mr r26,r3
1832
1833 /* Clear out the BSS. It may have been done in prom_init,
1834 * already but that's irrelevant since prom_init will soon
1835 * be detached from the kernel completely. Besides, we need
1836 * to clear it now for kexec-style entry.
1837 */
1838 LOADADDR(r11,__bss_stop)
1839 LOADADDR(r8,__bss_start)
1840 sub r11,r11,r8 /* bss size */
1841 addi r11,r11,7 /* round up to an even double word */
1842 rldicl. r11,r11,61,3 /* shift right by 3 */
1843 beq 4f
1844 addi r8,r8,-8
1845 li r0,0
1846 mtctr r11 /* zero this many doublewords */
18473: stdu r0,8(r8)
1848 bdnz 3b
18494:
1850
1851 mfmsr r6
1852 ori r6,r6,MSR_RI
1853 mtmsrd r6 /* RI on */
1854
1855#ifdef CONFIG_HMT
1856 /* Start up the second thread on cpu 0 */
1857 mfspr r3,PVR
1858 srwi r3,r3,16
1859 cmpwi r3,0x34 /* Pulsar */
1860 beq 90f
1861 cmpwi r3,0x36 /* Icestar */
1862 beq 90f
1863 cmpwi r3,0x37 /* SStar */
1864 beq 90f
1865 b 91f /* HMT not supported */
186690: li r3,0
1867 bl .hmt_start_secondary
186891:
1869#endif
1870
1871 /* The following gets the stack and TOC set up with the regs */
1872 /* pointing to the real addr of the kernel stack. This is */
1873 /* all done to support the C function call below which sets */
1874 /* up the htab. This is done because we have relocated the */
1875 /* kernel but are still running in real mode. */
1876
1877 LOADADDR(r3,init_thread_union)
1878 sub r3,r3,r26
1879
1880 /* set up a stack pointer (physical address) */
1881 addi r1,r3,THREAD_SIZE
1882 li r0,0
1883 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1884
1885 /* set up the TOC (physical address) */
1886 LOADADDR(r2,__toc_start)
1887 addi r2,r2,0x4000
1888 addi r2,r2,0x4000
1889 sub r2,r2,r26
1890
1891 LOADADDR(r3,cpu_specs)
1892 sub r3,r3,r26
1893 LOADADDR(r4,cur_cpu_spec)
1894 sub r4,r4,r26
1895 mr r5,r26
1896 bl .identify_cpu
1897
1898 /* Save some low level config HIDs of CPU0 to be copied to
1899 * other CPUs later on, or used for suspend/resume
1900 */
1901 bl .__save_cpu_setup
1902 sync
1903
1904 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1905 * note that boot_cpuid can always be 0 nowadays since there is
1906 * nowhere it can be initialized differently before we reach this
1907 * code
1908 */
1909 LOADADDR(r27, boot_cpuid)
1910 sub r27,r27,r26
1911 lwz r27,0(r27)
1912
1913 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1914 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1915 add r13,r13,r24 /* for this processor. */
1916 sub r13,r13,r26 /* convert to physical addr */
1917 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */
1918
1919 /* Do very early kernel initializations, including initial hash table,
1920 * stab and slb setup before we turn on relocation. */
1921
1922 /* Restore parameters passed from prom_init/kexec */
1923 mr r3,r31
1924 bl .early_setup
1925
1926 /* set the ASR */
1927 ld r3,PACASTABREAL(r13)
1928 ori r4,r3,1 /* turn on valid bit */
1929 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1930 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1931 cmpldi r3,PLATFORM_PSERIES_LPAR
1932 bne 98f
1933 mfspr r3,PVR
1934 srwi r3,r3,16
1935 cmpwi r3,0x37 /* SStar */
1936 beq 97f
1937 cmpwi r3,0x36 /* IStar */
1938 beq 97f
1939 cmpwi r3,0x34 /* Pulsar */
1940 bne 98f
194197: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1942 HVSC /* Invoking hcall */
1943 b 99f
194498: /* !(rpa hypervisor) || !(star) */
1945 mtasr r4 /* set the stab location */
194699:
1947 /* Set SDR1 (hash table pointer) */
1948 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1949 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1950 /* Test if bit 0 is set (LPAR bit) */
1951 andi. r3,r3,0x1
1952 bne 98f
1953 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1954 sub r6,r6,r26
1955 ld r6,0(r6) /* get the value of _SDR1 */
1956 mtspr SDR1,r6 /* set the htab location */
195798:
1958 LOADADDR(r3,.start_here_common)
1959 SET_REG_TO_CONST(r4, MSR_KERNEL)
1960 mtspr SRR0,r3
1961 mtspr SRR1,r4
1962 rfid
1963 b . /* prevent speculative execution */
1964#endif /* CONFIG_PPC_MULTIPLATFORM */
1965
1966 /* This is where all platforms converge execution */
1967_STATIC(start_here_common)
1968 /* relocation is on at this point */
1969
1970 /* The following code sets up the SP and TOC now that we are */
1971 /* running with translation enabled. */
1972
1973 LOADADDR(r3,init_thread_union)
1974
1975 /* set up the stack */
1976 addi r1,r3,THREAD_SIZE
1977 li r0,0
1978 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1979
1980 /* Apply the CPUs-specific fixups (nop out sections not relevant
1981 * to this CPU
1982 */
1983 li r3,0
1984 bl .do_cpu_ftr_fixups
1985
1986 LOADADDR(r26, boot_cpuid)
1987 lwz r26,0(r26)
1988
1989 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1990 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1991 add r13,r13,r24 /* for this processor. */
1992 mtspr SPRG3,r13
1993
1994 /* ptr to current */
1995 LOADADDR(r4,init_task)
1996 std r4,PACACURRENT(r13)
1997
1998 /* Load the TOC */
1999 ld r2,PACATOC(r13)
2000 std r1,PACAKSAVE(r13)
2001
2002 bl .setup_system
2003
2004 /* Load up the kernel context */
20055:
2006#ifdef DO_SOFT_DISABLE
2007 li r5,0
2008 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
2009 mfmsr r5
2010 ori r5,r5,MSR_EE /* Hard Enabled */
2011 mtmsrd r5
2012#endif
2013
2014 bl .start_kernel
2015
2016_GLOBAL(__setup_cpu_power3)
2017 blr
2018
2019_GLOBAL(hmt_init)
2020#ifdef CONFIG_HMT
2021 LOADADDR(r5, hmt_thread_data)
2022 mfspr r7,PVR
2023 srwi r7,r7,16
2024 cmpwi r7,0x34 /* Pulsar */
2025 beq 90f
2026 cmpwi r7,0x36 /* Icestar */
2027 beq 91f
2028 cmpwi r7,0x37 /* SStar */
2029 beq 91f
2030 b 101f
203190: mfspr r6,PIR
2032 andi. r6,r6,0x1f
2033 b 92f
203491: mfspr r6,PIR
2035 andi. r6,r6,0x3ff
203692: sldi r4,r24,3
2037 stwx r6,r5,r4
2038 bl .hmt_start_secondary
2039 b 101f
2040
2041__hmt_secondary_hold:
2042 LOADADDR(r5, hmt_thread_data)
2043 clrldi r5,r5,4
2044 li r7,0
2045 mfspr r6,PIR
2046 mfspr r8,PVR
2047 srwi r8,r8,16
2048 cmpwi r8,0x34
2049 bne 93f
2050 andi. r6,r6,0x1f
2051 b 103f
205293: andi. r6,r6,0x3f
2053
2054103: lwzx r8,r5,r7
2055 cmpw r8,r6
2056 beq 104f
2057 addi r7,r7,8
2058 b 103b
2059
2060104: addi r7,r7,4
2061 lwzx r9,r5,r7
2062 mr r24,r9
2063101:
2064#endif
2065 mr r3,r24
2066 b .pSeries_secondary_smp_init
2067
2068#ifdef CONFIG_HMT
2069_GLOBAL(hmt_start_secondary)
2070 LOADADDR(r4,__hmt_secondary_hold)
2071 clrldi r4,r4,4
2072 mtspr NIADORM, r4
2073 mfspr r4, MSRDORM
2074 li r5, -65
2075 and r4, r4, r5
2076 mtspr MSRDORM, r4
2077 lis r4,0xffef
2078 ori r4,r4,0x7403
2079 mtspr TSC, r4
2080 li r4,0x1f4
2081 mtspr TST, r4
2082 mfspr r4, HID0
2083 ori r4, r4, 0x1
2084 mtspr HID0, r4
2085 mfspr r4, CTRLF
2086 oris r4, r4, 0x40
2087 mtspr CTRLT, r4
2088 blr
2089#endif
2090
2091#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
2092_GLOBAL(smp_release_cpus)
2093 /* All secondary cpus are spinning on a common
2094 * spinloop, release them all now so they can start
2095 * to spin on their individual paca spinloops.
2096 * For non SMP kernels, the secondary cpus never
2097 * get out of the common spinloop.
2098 */
2099 li r3,1
2100 LOADADDR(r5,__secondary_hold_spinloop)
2101 std r3,0(r5)
2102 sync
2103 blr
2104#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
2105
2106
2107/*
2108 * We put a few things here that have to be page-aligned.
2109 * This stuff goes at the beginning of the data segment,
2110 * which is page-aligned.
2111 */
2112 .data
2113 .align 12
2114 .globl sdata
2115sdata:
2116 .globl empty_zero_page
2117empty_zero_page:
2118 .space 4096
2119
2120 .globl swapper_pg_dir
2121swapper_pg_dir:
2122 .space 4096
2123
2124 .globl ioremap_dir
2125ioremap_dir:
2126 .space 4096
2127
2128#ifdef CONFIG_SMP
2129/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
2130 .globl stab_array
2131stab_array:
2132 .space 4096 * 48
2133#endif
2134
2135/*
2136 * This space gets a copy of optional info passed to us by the bootstrap
2137 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2138 */
2139 .globl cmd_line
2140cmd_line:
2141 .space COMMAND_LINE_SIZE