| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 2 |  *  PowerPC version | 
 | 3 |  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
 | 4 |  *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | 
 | 5 |  *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | 
 | 6 |  *  Low-level exception handlers and MMU support | 
 | 7 |  *  rewritten by Paul Mackerras. | 
 | 8 |  *    Copyright (C) 1996 Paul Mackerras. | 
 | 9 |  *  MPC8xx modifications by Dan Malek | 
 | 10 |  *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | 
 | 11 |  * | 
 | 12 |  *  This file contains low-level support and setup for PowerPC 8xx | 
 | 13 |  *  embedded processors, including trap and interrupt dispatch. | 
 | 14 |  * | 
 | 15 |  *  This program is free software; you can redistribute it and/or | 
 | 16 |  *  modify it under the terms of the GNU General Public License | 
 | 17 |  *  as published by the Free Software Foundation; either version | 
 | 18 |  *  2 of the License, or (at your option) any later version. | 
 | 19 |  * | 
 | 20 |  */ | 
 | 21 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 22 | #include <asm/processor.h> | 
 | 23 | #include <asm/page.h> | 
 | 24 | #include <asm/mmu.h> | 
 | 25 | #include <asm/cache.h> | 
 | 26 | #include <asm/pgtable.h> | 
 | 27 | #include <asm/cputable.h> | 
 | 28 | #include <asm/thread_info.h> | 
 | 29 | #include <asm/ppc_asm.h> | 
 | 30 | #include <asm/asm-offsets.h> | 
 | 31 |  | 
 | 32 | /* Macro to make the code more readable. */ | 
 | 33 | #ifdef CONFIG_8xx_CPU6 | 
 | 34 | #define DO_8xx_CPU6(val, reg)	\ | 
 | 35 | 	li	reg, val;	\ | 
 | 36 | 	stw	reg, 12(r0);	\ | 
 | 37 | 	lwz	reg, 12(r0); | 
 | 38 | #else | 
 | 39 | #define DO_8xx_CPU6(val, reg) | 
 | 40 | #endif | 
| Kumar Gala | 748a768 | 2007-09-13 15:42:35 -0500 | [diff] [blame] | 41 | 	.section	.text.head, "ax" | 
 | 42 | _ENTRY(_stext); | 
 | 43 | _ENTRY(_start); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 44 |  | 
 | 45 | /* MPC8xx | 
 | 46 |  * This port was done on an MBX board with an 860.  Right now I only | 
 | 47 |  * support an ELF compressed (zImage) boot from EPPC-Bug because the | 
 | 48 |  * code there loads up some registers before calling us: | 
 | 49 |  *   r3: ptr to board info data | 
 | 50 |  *   r4: initrd_start or if no initrd then 0 | 
 | 51 |  *   r5: initrd_end - unused if r4 is 0 | 
 | 52 |  *   r6: Start of command line string | 
 | 53 |  *   r7: End of command line string | 
 | 54 |  * | 
 | 55 |  * I decided to use conditional compilation instead of checking PVR and | 
 | 56 |  * adding more processor specific branches around code I don't need. | 
 | 57 |  * Since this is an embedded processor, I also appreciate any memory | 
 | 58 |  * savings I can get. | 
 | 59 |  * | 
 | 60 |  * The MPC8xx does not have any BATs, but it supports large page sizes. | 
 | 61 |  * We first initialize the MMU to support 8M byte pages, then load one | 
 | 62 |  * entry into each of the instruction and data TLBs to map the first | 
 | 63 |  * 8M 1:1.  I also mapped an additional I/O space 1:1 so we can get to | 
 | 64 |  * the "internal" processor registers before MMU_init is called. | 
 | 65 |  * | 
 | 66 |  * The TLB code currently contains a major hack.  Since I use the condition | 
 | 67 |  * code register, I have to save and restore it.  I am out of registers, so | 
 | 68 |  * I just store it in memory location 0 (the TLB handlers are not reentrant). | 
 | 69 |  * To avoid making any decisions, I need to use the "segment" valid bit | 
 | 70 |  * in the first level table, but that would require many changes to the | 
 | 71 |  * Linux page directory/table functions that I don't want to do right now. | 
 | 72 |  * | 
 | 73 |  * I used to use SPRG2 for a temporary register in the TLB handler, but it | 
 | 74 |  * has since been put to other uses.  I now use a hack to save a register | 
 | 75 |  * and the CCR at memory location 0.....Someday I'll fix this..... | 
 | 76 |  *	-- Dan | 
 | 77 |  */ | 
 | 78 | 	.globl	__start | 
 | 79 | __start: | 
 | 80 | 	mr	r31,r3			/* save parameters */ | 
 | 81 | 	mr	r30,r4 | 
 | 82 | 	mr	r29,r5 | 
 | 83 | 	mr	r28,r6 | 
 | 84 | 	mr	r27,r7 | 
 | 85 |  | 
 | 86 | 	/* We have to turn on the MMU right away so we get cache modes | 
 | 87 | 	 * set correctly. | 
 | 88 | 	 */ | 
 | 89 | 	bl	initial_mmu | 
 | 90 |  | 
 | 91 | /* We now have the lower 8 Meg mapped into TLB entries, and the caches | 
 | 92 |  * ready to work. | 
 | 93 |  */ | 
 | 94 |  | 
 | 95 | turn_on_mmu: | 
 | 96 | 	mfmsr	r0 | 
 | 97 | 	ori	r0,r0,MSR_DR|MSR_IR | 
 | 98 | 	mtspr	SPRN_SRR1,r0 | 
 | 99 | 	lis	r0,start_here@h | 
 | 100 | 	ori	r0,r0,start_here@l | 
 | 101 | 	mtspr	SPRN_SRR0,r0 | 
 | 102 | 	SYNC | 
 | 103 | 	rfi				/* enables MMU */ | 
 | 104 |  | 
 | 105 | /* | 
 | 106 |  * Exception entry code.  This code runs with address translation | 
 | 107 |  * turned off, i.e. using physical addresses. | 
 | 108 |  * We assume sprg3 has the physical address of the current | 
 | 109 |  * task's thread_struct. | 
 | 110 |  */ | 
 | 111 | #define EXCEPTION_PROLOG	\ | 
 | 112 | 	mtspr	SPRN_SPRG0,r10;	\ | 
 | 113 | 	mtspr	SPRN_SPRG1,r11;	\ | 
 | 114 | 	mfcr	r10;		\ | 
 | 115 | 	EXCEPTION_PROLOG_1;	\ | 
 | 116 | 	EXCEPTION_PROLOG_2 | 
 | 117 |  | 
 | 118 | #define EXCEPTION_PROLOG_1	\ | 
 | 119 | 	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel */ \ | 
 | 120 | 	andi.	r11,r11,MSR_PR;	\ | 
 | 121 | 	tophys(r11,r1);			/* use tophys(r1) if kernel */ \ | 
 | 122 | 	beq	1f;		\ | 
 | 123 | 	mfspr	r11,SPRN_SPRG3;	\ | 
 | 124 | 	lwz	r11,THREAD_INFO-THREAD(r11);	\ | 
 | 125 | 	addi	r11,r11,THREAD_SIZE;	\ | 
 | 126 | 	tophys(r11,r11);	\ | 
 | 127 | 1:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */ | 
 | 128 |  | 
 | 129 |  | 
 | 130 | #define EXCEPTION_PROLOG_2	\ | 
 | 131 | 	CLR_TOP32(r11);		\ | 
 | 132 | 	stw	r10,_CCR(r11);		/* save registers */ \ | 
 | 133 | 	stw	r12,GPR12(r11);	\ | 
 | 134 | 	stw	r9,GPR9(r11);	\ | 
 | 135 | 	mfspr	r10,SPRN_SPRG0;	\ | 
 | 136 | 	stw	r10,GPR10(r11);	\ | 
 | 137 | 	mfspr	r12,SPRN_SPRG1;	\ | 
 | 138 | 	stw	r12,GPR11(r11);	\ | 
 | 139 | 	mflr	r10;		\ | 
 | 140 | 	stw	r10,_LINK(r11);	\ | 
 | 141 | 	mfspr	r12,SPRN_SRR0;	\ | 
 | 142 | 	mfspr	r9,SPRN_SRR1;	\ | 
 | 143 | 	stw	r1,GPR1(r11);	\ | 
 | 144 | 	stw	r1,0(r11);	\ | 
 | 145 | 	tovirt(r1,r11);			/* set new kernel sp */	\ | 
 | 146 | 	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | 
 | 147 | 	MTMSRD(r10);			/* (except for mach check in rtas) */ \ | 
 | 148 | 	stw	r0,GPR0(r11);	\ | 
 | 149 | 	SAVE_4GPRS(3, r11);	\ | 
 | 150 | 	SAVE_2GPRS(7, r11) | 
 | 151 |  | 
 | 152 | /* | 
 | 153 |  * Note: code which follows this uses cr0.eq (set if from kernel), | 
 | 154 |  * r11, r12 (SRR0), and r9 (SRR1). | 
 | 155 |  * | 
 | 156 |  * Note2: once we have set r1 we are in a position to take exceptions | 
 | 157 |  * again, and we could thus set MSR:RI at that point. | 
 | 158 |  */ | 
 | 159 |  | 
 | 160 | /* | 
 | 161 |  * Exception vectors. | 
 | 162 |  */ | 
 | 163 | #define EXCEPTION(n, label, hdlr, xfer)		\ | 
 | 164 | 	. = n;					\ | 
 | 165 | label:						\ | 
 | 166 | 	EXCEPTION_PROLOG;			\ | 
 | 167 | 	addi	r3,r1,STACK_FRAME_OVERHEAD;	\ | 
 | 168 | 	xfer(n, hdlr) | 
 | 169 |  | 
 | 170 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)	\ | 
 | 171 | 	li	r10,trap;					\ | 
| Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 172 | 	stw	r10,_TRAP(r11);					\ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 173 | 	li	r10,MSR_KERNEL;					\ | 
 | 174 | 	copyee(r10, r9);					\ | 
 | 175 | 	bl	tfer;						\ | 
 | 176 | i##n:								\ | 
 | 177 | 	.long	hdlr;						\ | 
 | 178 | 	.long	ret | 
 | 179 |  | 
 | 180 | #define COPY_EE(d, s)		rlwimi d,s,0,16,16 | 
 | 181 | #define NOCOPY(d, s) | 
 | 182 |  | 
 | 183 | #define EXC_XFER_STD(n, hdlr)		\ | 
 | 184 | 	EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full,	\ | 
 | 185 | 			  ret_from_except_full) | 
 | 186 |  | 
 | 187 | #define EXC_XFER_LITE(n, hdlr)		\ | 
 | 188 | 	EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | 
 | 189 | 			  ret_from_except) | 
 | 190 |  | 
 | 191 | #define EXC_XFER_EE(n, hdlr)		\ | 
 | 192 | 	EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | 
 | 193 | 			  ret_from_except_full) | 
 | 194 |  | 
 | 195 | #define EXC_XFER_EE_LITE(n, hdlr)	\ | 
 | 196 | 	EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | 
 | 197 | 			  ret_from_except) | 
 | 198 |  | 
 | 199 | /* System reset */ | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 200 | 	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 201 |  | 
 | 202 | /* Machine check */ | 
 | 203 | 	. = 0x200 | 
 | 204 | MachineCheck: | 
 | 205 | 	EXCEPTION_PROLOG | 
 | 206 | 	mfspr r4,SPRN_DAR | 
 | 207 | 	stw r4,_DAR(r11) | 
 | 208 | 	mfspr r5,SPRN_DSISR | 
 | 209 | 	stw r5,_DSISR(r11) | 
 | 210 | 	addi r3,r1,STACK_FRAME_OVERHEAD | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 211 | 	EXC_XFER_STD(0x200, machine_check_exception) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 212 |  | 
 | 213 | /* Data access exception. | 
 | 214 |  * This is "never generated" by the MPC8xx.  We jump to it for other | 
 | 215 |  * translation errors. | 
 | 216 |  */ | 
 | 217 | 	. = 0x300 | 
 | 218 | DataAccess: | 
 | 219 | 	EXCEPTION_PROLOG | 
 | 220 | 	mfspr	r10,SPRN_DSISR | 
 | 221 | 	stw	r10,_DSISR(r11) | 
 | 222 | 	mr	r5,r10 | 
 | 223 | 	mfspr	r4,SPRN_DAR | 
 | 224 | 	EXC_XFER_EE_LITE(0x300, handle_page_fault) | 
 | 225 |  | 
 | 226 | /* Instruction access exception. | 
 | 227 |  * This is "never generated" by the MPC8xx.  We jump to it for other | 
 | 228 |  * translation errors. | 
 | 229 |  */ | 
 | 230 | 	. = 0x400 | 
 | 231 | InstructionAccess: | 
 | 232 | 	EXCEPTION_PROLOG | 
 | 233 | 	mr	r4,r12 | 
 | 234 | 	mr	r5,r9 | 
 | 235 | 	EXC_XFER_EE_LITE(0x400, handle_page_fault) | 
 | 236 |  | 
 | 237 | /* External interrupt */ | 
 | 238 | 	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | 
 | 239 |  | 
 | 240 | /* Alignment exception */ | 
 | 241 | 	. = 0x600 | 
 | 242 | Alignment: | 
 | 243 | 	EXCEPTION_PROLOG | 
 | 244 | 	mfspr	r4,SPRN_DAR | 
 | 245 | 	stw	r4,_DAR(r11) | 
 | 246 | 	mfspr	r5,SPRN_DSISR | 
 | 247 | 	stw	r5,_DSISR(r11) | 
 | 248 | 	addi	r3,r1,STACK_FRAME_OVERHEAD | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 249 | 	EXC_XFER_EE(0x600, alignment_exception) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 250 |  | 
 | 251 | /* Program check exception */ | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 252 | 	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 253 |  | 
 | 254 | /* No FPU on MPC8xx.  This exception is not supposed to happen. | 
 | 255 | */ | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 256 | 	EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 257 |  | 
 | 258 | /* Decrementer */ | 
 | 259 | 	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | 
 | 260 |  | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 261 | 	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE) | 
 | 262 | 	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 263 |  | 
 | 264 | /* System call */ | 
 | 265 | 	. = 0xc00 | 
 | 266 | SystemCall: | 
 | 267 | 	EXCEPTION_PROLOG | 
 | 268 | 	EXC_XFER_EE_LITE(0xc00, DoSyscall) | 
 | 269 |  | 
 | 270 | /* Single step - not used on 601 */ | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 271 | 	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) | 
 | 272 | 	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE) | 
 | 273 | 	EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 274 |  | 
 | 275 | /* On the MPC8xx, this is a software emulation interrupt.  It occurs | 
 | 276 |  * for all unimplemented and illegal instructions. | 
 | 277 |  */ | 
 | 278 | 	EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD) | 
 | 279 |  | 
 | 280 | 	. = 0x1100 | 
 | 281 | /* | 
 | 282 |  * For the MPC8xx, this is a software tablewalk to load the instruction | 
 | 283 |  * TLB.  It is modelled after the example in the Motorola manual.  The task | 
 | 284 |  * switch loads the M_TWB register with the pointer to the first level table. | 
 | 285 |  * If we discover there is no second level table (value is zero) or if there | 
 | 286 |  * is an invalid pte, we load that into the TLB, which causes another fault | 
 | 287 |  * into the TLB Error interrupt where we can handle such problems. | 
 | 288 |  * We have to use the MD_xxx registers for the tablewalk because the | 
 | 289 |  * equivalent MI_xxx registers only perform the attribute functions. | 
 | 290 |  */ | 
 | 291 | InstructionTLBMiss: | 
 | 292 | #ifdef CONFIG_8xx_CPU6 | 
 | 293 | 	stw	r3, 8(r0) | 
 | 294 | #endif | 
 | 295 | 	DO_8xx_CPU6(0x3f80, r3) | 
 | 296 | 	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */ | 
 | 297 | 	mfcr	r10 | 
 | 298 | 	stw	r10, 0(r0) | 
 | 299 | 	stw	r11, 4(r0) | 
 | 300 | 	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */ | 
| Scott Wood | 7401685 | 2007-06-25 14:50:41 -0500 | [diff] [blame] | 301 | #ifdef CONFIG_8xx_CPU15 | 
 | 302 | 	addi	r11, r10, 0x1000 | 
 | 303 | 	tlbie	r11 | 
 | 304 | 	addi	r11, r10, -0x1000 | 
 | 305 | 	tlbie	r11 | 
 | 306 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 307 | 	DO_8xx_CPU6(0x3780, r3) | 
 | 308 | 	mtspr	SPRN_MD_EPN, r10	/* Have to use MD_EPN for walk, MI_EPN can't */ | 
 | 309 | 	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */ | 
 | 310 |  | 
 | 311 | 	/* If we are faulting a kernel address, we have to use the | 
 | 312 | 	 * kernel page tables. | 
 | 313 | 	 */ | 
 | 314 | 	andi.	r11, r10, 0x0800	/* Address >= 0x80000000 */ | 
 | 315 | 	beq	3f | 
 | 316 | 	lis	r11, swapper_pg_dir@h | 
 | 317 | 	ori	r11, r11, swapper_pg_dir@l | 
 | 318 | 	rlwimi	r10, r11, 0, 2, 19 | 
 | 319 | 3: | 
 | 320 | 	lwz	r11, 0(r10)	/* Get the level 1 entry */ | 
 | 321 | 	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */ | 
 | 322 | 	beq	2f		/* If zero, don't try to find a pte */ | 
 | 323 |  | 
 | 324 | 	/* We have a pte table, so load the MI_TWC with the attributes | 
 | 325 | 	 * for this "segment." | 
 | 326 | 	 */ | 
 | 327 | 	ori	r11,r11,1		/* Set valid bit */ | 
 | 328 | 	DO_8xx_CPU6(0x2b80, r3) | 
 | 329 | 	mtspr	SPRN_MI_TWC, r11	/* Set segment attributes */ | 
 | 330 | 	DO_8xx_CPU6(0x3b80, r3) | 
 | 331 | 	mtspr	SPRN_MD_TWC, r11	/* Load pte table base address */ | 
 | 332 | 	mfspr	r11, SPRN_MD_TWC	/* ....and get the pte address */ | 
 | 333 | 	lwz	r10, 0(r11)	/* Get the pte */ | 
 | 334 |  | 
| Vitaly Bordug | 76db5bd | 2008-03-06 13:53:30 +0300 | [diff] [blame] | 335 | #ifdef CONFIG_SWAP | 
 | 336 | 	/* do not set the _PAGE_ACCESSED bit of a non-present page */ | 
 | 337 | 	andi.	r11, r10, _PAGE_PRESENT | 
 | 338 | 	beq	4f | 
 | 339 | 	ori	r10, r10, _PAGE_ACCESSED | 
 | 340 | 	mfspr	r11, SPRN_MD_TWC	/* get the pte address again */ | 
 | 341 | 	stw	r10, 0(r11) | 
 | 342 | 4: | 
 | 343 | #else | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 344 | 	ori	r10, r10, _PAGE_ACCESSED | 
 | 345 | 	stw	r10, 0(r11) | 
| Vitaly Bordug | 76db5bd | 2008-03-06 13:53:30 +0300 | [diff] [blame] | 346 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 347 |  | 
 | 348 | 	/* The Linux PTE won't go exactly into the MMU TLB. | 
 | 349 | 	 * Software indicator bits 21, 22 and 28 must be clear. | 
 | 350 | 	 * Software indicator bits 24, 25, 26, and 27 must be | 
 | 351 | 	 * set.  All other Linux PTE bits control the behavior | 
 | 352 | 	 * of the MMU. | 
 | 353 | 	 */ | 
 | 354 | 2:	li	r11, 0x00f0 | 
 | 355 | 	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */ | 
 | 356 | 	DO_8xx_CPU6(0x2d80, r3) | 
 | 357 | 	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */ | 
 | 358 |  | 
 | 359 | 	mfspr	r10, SPRN_M_TW	/* Restore registers */ | 
 | 360 | 	lwz	r11, 0(r0) | 
 | 361 | 	mtcr	r11 | 
 | 362 | 	lwz	r11, 4(r0) | 
 | 363 | #ifdef CONFIG_8xx_CPU6 | 
 | 364 | 	lwz	r3, 8(r0) | 
 | 365 | #endif | 
 | 366 | 	rfi | 
 | 367 |  | 
 | 368 | 	. = 0x1200 | 
 | 369 | DataStoreTLBMiss: | 
 | 370 | #ifdef CONFIG_8xx_CPU6 | 
 | 371 | 	stw	r3, 8(r0) | 
 | 372 | #endif | 
 | 373 | 	DO_8xx_CPU6(0x3f80, r3) | 
 | 374 | 	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */ | 
 | 375 | 	mfcr	r10 | 
 | 376 | 	stw	r10, 0(r0) | 
 | 377 | 	stw	r11, 4(r0) | 
 | 378 | 	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */ | 
 | 379 |  | 
 | 380 | 	/* If we are faulting a kernel address, we have to use the | 
 | 381 | 	 * kernel page tables. | 
 | 382 | 	 */ | 
 | 383 | 	andi.	r11, r10, 0x0800 | 
 | 384 | 	beq	3f | 
 | 385 | 	lis	r11, swapper_pg_dir@h | 
 | 386 | 	ori	r11, r11, swapper_pg_dir@l | 
 | 387 | 	rlwimi	r10, r11, 0, 2, 19 | 
 | 388 | 3: | 
 | 389 | 	lwz	r11, 0(r10)	/* Get the level 1 entry */ | 
 | 390 | 	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */ | 
 | 391 | 	beq	2f		/* If zero, don't try to find a pte */ | 
 | 392 |  | 
 | 393 | 	/* We have a pte table, so load fetch the pte from the table. | 
 | 394 | 	 */ | 
 | 395 | 	ori	r11, r11, 1	/* Set valid bit in physical L2 page */ | 
 | 396 | 	DO_8xx_CPU6(0x3b80, r3) | 
 | 397 | 	mtspr	SPRN_MD_TWC, r11	/* Load pte table base address */ | 
 | 398 | 	mfspr	r10, SPRN_MD_TWC	/* ....and get the pte address */ | 
 | 399 | 	lwz	r10, 0(r10)	/* Get the pte */ | 
 | 400 |  | 
 | 401 | 	/* Insert the Guarded flag into the TWC from the Linux PTE. | 
 | 402 | 	 * It is bit 27 of both the Linux PTE and the TWC (at least | 
 | 403 | 	 * I got that right :-).  It will be better when we can put | 
 | 404 | 	 * this into the Linux pgd/pmd and load it in the operation | 
 | 405 | 	 * above. | 
 | 406 | 	 */ | 
 | 407 | 	rlwimi	r11, r10, 0, 27, 27 | 
 | 408 | 	DO_8xx_CPU6(0x3b80, r3) | 
 | 409 | 	mtspr	SPRN_MD_TWC, r11 | 
 | 410 |  | 
| Vitaly Bordug | 76db5bd | 2008-03-06 13:53:30 +0300 | [diff] [blame] | 411 | #ifdef CONFIG_SWAP | 
 | 412 | 	/* do not set the _PAGE_ACCESSED bit of a non-present page */ | 
 | 413 | 	andi.	r11, r10, _PAGE_PRESENT | 
 | 414 | 	beq	4f | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 415 | 	ori	r10, r10, _PAGE_ACCESSED | 
| Vitaly Bordug | 76db5bd | 2008-03-06 13:53:30 +0300 | [diff] [blame] | 416 | 4: | 
 | 417 | 	/* and update pte in table */ | 
 | 418 | #else | 
 | 419 | 	ori	r10, r10, _PAGE_ACCESSED | 
 | 420 | #endif | 
 | 421 | 	mfspr	r11, SPRN_MD_TWC	/* get the pte address again */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 422 | 	stw	r10, 0(r11) | 
 | 423 |  | 
 | 424 | 	/* The Linux PTE won't go exactly into the MMU TLB. | 
 | 425 | 	 * Software indicator bits 21, 22 and 28 must be clear. | 
 | 426 | 	 * Software indicator bits 24, 25, 26, and 27 must be | 
 | 427 | 	 * set.  All other Linux PTE bits control the behavior | 
 | 428 | 	 * of the MMU. | 
 | 429 | 	 */ | 
 | 430 | 2:	li	r11, 0x00f0 | 
 | 431 | 	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */ | 
 | 432 | 	DO_8xx_CPU6(0x3d80, r3) | 
 | 433 | 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */ | 
 | 434 |  | 
 | 435 | 	mfspr	r10, SPRN_M_TW	/* Restore registers */ | 
 | 436 | 	lwz	r11, 0(r0) | 
 | 437 | 	mtcr	r11 | 
 | 438 | 	lwz	r11, 4(r0) | 
 | 439 | #ifdef CONFIG_8xx_CPU6 | 
 | 440 | 	lwz	r3, 8(r0) | 
 | 441 | #endif | 
 | 442 | 	rfi | 
 | 443 |  | 
 | 444 | /* This is an instruction TLB error on the MPC8xx.  This could be due | 
 | 445 |  * to many reasons, such as executing guarded memory or illegal instruction | 
 | 446 |  * addresses.  There is nothing to do but handle a big time error fault. | 
 | 447 |  */ | 
 | 448 | 	. = 0x1300 | 
 | 449 | InstructionTLBError: | 
 | 450 | 	b	InstructionAccess | 
 | 451 |  | 
 | 452 | /* This is the data TLB error on the MPC8xx.  This could be due to | 
 | 453 |  * many reasons, including a dirty update to a pte.  We can catch that | 
 | 454 |  * one here, but anything else is an error.  First, we track down the | 
 | 455 |  * Linux pte.  If it is valid, write access is allowed, but the | 
 | 456 |  * page dirty bit is not set, we will set it and reload the TLB.  For | 
 | 457 |  * any other case, we bail out to a higher level function that can | 
 | 458 |  * handle it. | 
 | 459 |  */ | 
 | 460 | 	. = 0x1400 | 
 | 461 | DataTLBError: | 
 | 462 | #ifdef CONFIG_8xx_CPU6 | 
 | 463 | 	stw	r3, 8(r0) | 
 | 464 | #endif | 
 | 465 | 	DO_8xx_CPU6(0x3f80, r3) | 
 | 466 | 	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */ | 
 | 467 | 	mfcr	r10 | 
 | 468 | 	stw	r10, 0(r0) | 
 | 469 | 	stw	r11, 4(r0) | 
 | 470 |  | 
 | 471 | 	/* First, make sure this was a store operation. | 
 | 472 | 	*/ | 
 | 473 | 	mfspr	r10, SPRN_DSISR | 
 | 474 | 	andis.	r11, r10, 0x0200	/* If set, indicates store op */ | 
 | 475 | 	beq	2f | 
 | 476 |  | 
 | 477 | 	/* The EA of a data TLB miss is automatically stored in the MD_EPN | 
 | 478 | 	 * register.  The EA of a data TLB error is automatically stored in | 
 | 479 | 	 * the DAR, but not the MD_EPN register.  We must copy the 20 most | 
 | 480 | 	 * significant bits of the EA from the DAR to MD_EPN before we | 
 | 481 | 	 * start walking the page tables.  We also need to copy the CASID | 
 | 482 | 	 * value from the M_CASID register. | 
 | 483 | 	 * Addendum:  The EA of a data TLB error is _supposed_ to be stored | 
 | 484 | 	 * in DAR, but it seems that this doesn't happen in some cases, such | 
 | 485 | 	 * as when the error is due to a dcbi instruction to a page with a | 
 | 486 | 	 * TLB that doesn't have the changed bit set.  In such cases, there | 
 | 487 | 	 * does not appear to be any way  to recover the EA of the error | 
 | 488 | 	 * since it is neither in DAR nor MD_EPN.  As a workaround, the | 
 | 489 | 	 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs | 
 | 490 | 	 * are initialized in mapin_ram().  This will avoid the problem, | 
 | 491 | 	 * assuming we only use the dcbi instruction on kernel addresses. | 
 | 492 | 	 */ | 
 | 493 | 	mfspr	r10, SPRN_DAR | 
 | 494 | 	rlwinm	r11, r10, 0, 0, 19 | 
 | 495 | 	ori	r11, r11, MD_EVALID | 
 | 496 | 	mfspr	r10, SPRN_M_CASID | 
 | 497 | 	rlwimi	r11, r10, 0, 28, 31 | 
 | 498 | 	DO_8xx_CPU6(0x3780, r3) | 
 | 499 | 	mtspr	SPRN_MD_EPN, r11 | 
 | 500 |  | 
 | 501 | 	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */ | 
 | 502 |  | 
 | 503 | 	/* If we are faulting a kernel address, we have to use the | 
 | 504 | 	 * kernel page tables. | 
 | 505 | 	 */ | 
 | 506 | 	andi.	r11, r10, 0x0800 | 
 | 507 | 	beq	3f | 
 | 508 | 	lis	r11, swapper_pg_dir@h | 
 | 509 | 	ori	r11, r11, swapper_pg_dir@l | 
 | 510 | 	rlwimi	r10, r11, 0, 2, 19 | 
 | 511 | 3: | 
 | 512 | 	lwz	r11, 0(r10)	/* Get the level 1 entry */ | 
 | 513 | 	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */ | 
 | 514 | 	beq	2f		/* If zero, bail */ | 
 | 515 |  | 
 | 516 | 	/* We have a pte table, so fetch the pte from the table. | 
 | 517 | 	 */ | 
 | 518 | 	ori	r11, r11, 1		/* Set valid bit in physical L2 page */ | 
 | 519 | 	DO_8xx_CPU6(0x3b80, r3) | 
 | 520 | 	mtspr	SPRN_MD_TWC, r11		/* Load pte table base address */ | 
 | 521 | 	mfspr	r11, SPRN_MD_TWC		/* ....and get the pte address */ | 
 | 522 | 	lwz	r10, 0(r11)		/* Get the pte */ | 
 | 523 |  | 
 | 524 | 	andi.	r11, r10, _PAGE_RW	/* Is it writeable? */ | 
 | 525 | 	beq	2f			/* Bail out if not */ | 
 | 526 |  | 
 | 527 | 	/* Update 'changed', among others. | 
 | 528 | 	*/ | 
| Vitaly Bordug | 76db5bd | 2008-03-06 13:53:30 +0300 | [diff] [blame] | 529 | #ifdef CONFIG_SWAP | 
 | 530 | 	ori	r10, r10, _PAGE_DIRTY|_PAGE_HWWRITE | 
 | 531 | 	/* do not set the _PAGE_ACCESSED bit of a non-present page */ | 
 | 532 | 	andi.	r11, r10, _PAGE_PRESENT | 
 | 533 | 	beq	4f | 
 | 534 | 	ori	r10, r10, _PAGE_ACCESSED | 
 | 535 | 4: | 
 | 536 | #else | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 537 | 	ori	r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | 
| Vitaly Bordug | 76db5bd | 2008-03-06 13:53:30 +0300 | [diff] [blame] | 538 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 539 | 	mfspr	r11, SPRN_MD_TWC		/* Get pte address again */ | 
 | 540 | 	stw	r10, 0(r11)		/* and update pte in table */ | 
 | 541 |  | 
 | 542 | 	/* The Linux PTE won't go exactly into the MMU TLB. | 
 | 543 | 	 * Software indicator bits 21, 22 and 28 must be clear. | 
 | 544 | 	 * Software indicator bits 24, 25, 26, and 27 must be | 
 | 545 | 	 * set.  All other Linux PTE bits control the behavior | 
 | 546 | 	 * of the MMU. | 
 | 547 | 	 */ | 
 | 548 | 	li	r11, 0x00f0 | 
 | 549 | 	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */ | 
 | 550 | 	DO_8xx_CPU6(0x3d80, r3) | 
 | 551 | 	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */ | 
 | 552 |  | 
 | 553 | 	mfspr	r10, SPRN_M_TW	/* Restore registers */ | 
 | 554 | 	lwz	r11, 0(r0) | 
 | 555 | 	mtcr	r11 | 
 | 556 | 	lwz	r11, 4(r0) | 
 | 557 | #ifdef CONFIG_8xx_CPU6 | 
 | 558 | 	lwz	r3, 8(r0) | 
 | 559 | #endif | 
 | 560 | 	rfi | 
 | 561 | 2: | 
 | 562 | 	mfspr	r10, SPRN_M_TW	/* Restore registers */ | 
 | 563 | 	lwz	r11, 0(r0) | 
 | 564 | 	mtcr	r11 | 
 | 565 | 	lwz	r11, 4(r0) | 
 | 566 | #ifdef CONFIG_8xx_CPU6 | 
 | 567 | 	lwz	r3, 8(r0) | 
 | 568 | #endif | 
 | 569 | 	b	DataAccess | 
 | 570 |  | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 571 | 	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) | 
 | 572 | 	EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) | 
 | 573 | 	EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE) | 
 | 574 | 	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) | 
 | 575 | 	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) | 
 | 576 | 	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE) | 
 | 577 | 	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 578 |  | 
 | 579 | /* On the MPC8xx, these next four traps are used for development | 
 | 580 |  * support of breakpoints and such.  Someday I will get around to | 
 | 581 |  * using them. | 
 | 582 |  */ | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 583 | 	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) | 
 | 584 | 	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) | 
 | 585 | 	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) | 
 | 586 | 	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 587 |  | 
 | 588 | 	. = 0x2000 | 
 | 589 |  | 
 | 590 | 	.globl	giveup_fpu | 
 | 591 | giveup_fpu: | 
 | 592 | 	blr | 
 | 593 |  | 
 | 594 | /* | 
 | 595 |  * This is where the main kernel code starts. | 
 | 596 |  */ | 
 | 597 | start_here: | 
 | 598 | 	/* ptr to current */ | 
 | 599 | 	lis	r2,init_task@h | 
 | 600 | 	ori	r2,r2,init_task@l | 
 | 601 |  | 
 | 602 | 	/* ptr to phys current thread */ | 
 | 603 | 	tophys(r4,r2) | 
 | 604 | 	addi	r4,r4,THREAD	/* init task's THREAD */ | 
 | 605 | 	mtspr	SPRN_SPRG3,r4 | 
 | 606 | 	li	r3,0 | 
 | 607 | 	mtspr	SPRN_SPRG2,r3	/* 0 => r1 has kernel sp */ | 
 | 608 |  | 
 | 609 | 	/* stack */ | 
 | 610 | 	lis	r1,init_thread_union@ha | 
 | 611 | 	addi	r1,r1,init_thread_union@l | 
 | 612 | 	li	r0,0 | 
 | 613 | 	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | 
 | 614 |  | 
 | 615 | 	bl	early_init	/* We have to do this with MMU on */ | 
 | 616 |  | 
 | 617 | /* | 
 | 618 |  * Decide what sort of machine this is and initialize the MMU. | 
 | 619 |  */ | 
 | 620 | 	mr	r3,r31 | 
 | 621 | 	mr	r4,r30 | 
 | 622 | 	mr	r5,r29 | 
 | 623 | 	mr	r6,r28 | 
 | 624 | 	mr	r7,r27 | 
 | 625 | 	bl	machine_init | 
 | 626 | 	bl	MMU_init | 
 | 627 |  | 
 | 628 | /* | 
 | 629 |  * Go back to running unmapped so we can load up new values | 
 | 630 |  * and change to using our exception vectors. | 
 | 631 |  * On the 8xx, all we have to do is invalidate the TLB to clear | 
 | 632 |  * the old 8M byte TLB mappings and load the page table base register. | 
 | 633 |  */ | 
 | 634 | 	/* The right way to do this would be to track it down through | 
 | 635 | 	 * init's THREAD like the context switch code does, but this is | 
 | 636 | 	 * easier......until someone changes init's static structures. | 
 | 637 | 	 */ | 
 | 638 | 	lis	r6, swapper_pg_dir@h | 
 | 639 | 	ori	r6, r6, swapper_pg_dir@l | 
 | 640 | 	tophys(r6,r6) | 
 | 641 | #ifdef CONFIG_8xx_CPU6 | 
 | 642 | 	lis	r4, cpu6_errata_word@h | 
 | 643 | 	ori	r4, r4, cpu6_errata_word@l | 
 | 644 | 	li	r3, 0x3980 | 
 | 645 | 	stw	r3, 12(r4) | 
 | 646 | 	lwz	r3, 12(r4) | 
 | 647 | #endif | 
 | 648 | 	mtspr	SPRN_M_TWB, r6 | 
 | 649 | 	lis	r4,2f@h | 
 | 650 | 	ori	r4,r4,2f@l | 
 | 651 | 	tophys(r4,r4) | 
 | 652 | 	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | 
 | 653 | 	mtspr	SPRN_SRR0,r4 | 
 | 654 | 	mtspr	SPRN_SRR1,r3 | 
 | 655 | 	rfi | 
 | 656 | /* Load up the kernel context */ | 
 | 657 | 2: | 
 | 658 | 	SYNC			/* Force all PTE updates to finish */ | 
 | 659 | 	tlbia			/* Clear all TLB entries */ | 
 | 660 | 	sync			/* wait for tlbia/tlbie to finish */ | 
 | 661 | 	TLBSYNC			/* ... on all CPUs */ | 
 | 662 |  | 
 | 663 | 	/* set up the PTE pointers for the Abatron bdiGDB. | 
 | 664 | 	*/ | 
 | 665 | 	tovirt(r6,r6) | 
 | 666 | 	lis	r5, abatron_pteptrs@h | 
 | 667 | 	ori	r5, r5, abatron_pteptrs@l | 
 | 668 | 	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */ | 
 | 669 | 	tophys(r5,r5) | 
 | 670 | 	stw	r6, 0(r5) | 
 | 671 |  | 
 | 672 | /* Now turn on the MMU for real! */ | 
 | 673 | 	li	r4,MSR_KERNEL | 
 | 674 | 	lis	r3,start_kernel@h | 
 | 675 | 	ori	r3,r3,start_kernel@l | 
 | 676 | 	mtspr	SPRN_SRR0,r3 | 
 | 677 | 	mtspr	SPRN_SRR1,r4 | 
 | 678 | 	rfi			/* enable MMU and jump to start_kernel */ | 
 | 679 |  | 
 | 680 | /* Set up the initial MMU state so we can do the first level of | 
 | 681 |  * kernel initialization.  This maps the first 8 MBytes of memory 1:1 | 
 | 682 |  * virtual to physical.  Also, set the cache mode since that is defined | 
 | 683 |  * by TLB entries and perform any additional mapping (like of the IMMR). | 
 | 684 |  * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, | 
 | 685 |  * 24 Mbytes of data, and the 8M IMMR space.  Anything not covered by | 
 | 686 |  * these mappings is mapped by page tables. | 
 | 687 |  */ | 
 | 688 | initial_mmu: | 
 | 689 | 	tlbia			/* Invalidate all TLB entries */ | 
 | 690 | #ifdef CONFIG_PIN_TLB | 
 | 691 | 	lis	r8, MI_RSV4I@h | 
 | 692 | 	ori	r8, r8, 0x1c00 | 
 | 693 | #else | 
 | 694 | 	li	r8, 0 | 
 | 695 | #endif | 
 | 696 | 	mtspr	SPRN_MI_CTR, r8	/* Set instruction MMU control */ | 
 | 697 |  | 
 | 698 | #ifdef CONFIG_PIN_TLB | 
 | 699 | 	lis	r10, (MD_RSV4I | MD_RESETVAL)@h | 
 | 700 | 	ori	r10, r10, 0x1c00 | 
 | 701 | 	mr	r8, r10 | 
 | 702 | #else | 
 | 703 | 	lis	r10, MD_RESETVAL@h | 
 | 704 | #endif | 
 | 705 | #ifndef CONFIG_8xx_COPYBACK | 
 | 706 | 	oris	r10, r10, MD_WTDEF@h | 
 | 707 | #endif | 
 | 708 | 	mtspr	SPRN_MD_CTR, r10	/* Set data TLB control */ | 
 | 709 |  | 
 | 710 | 	/* Now map the lower 8 Meg into the TLBs.  For this quick hack, | 
 | 711 | 	 * we can load the instruction and data TLB registers with the | 
 | 712 | 	 * same values. | 
 | 713 | 	 */ | 
 | 714 | 	lis	r8, KERNELBASE@h	/* Create vaddr for TLB */ | 
 | 715 | 	ori	r8, r8, MI_EVALID	/* Mark it valid */ | 
 | 716 | 	mtspr	SPRN_MI_EPN, r8 | 
 | 717 | 	mtspr	SPRN_MD_EPN, r8 | 
 | 718 | 	li	r8, MI_PS8MEG		/* Set 8M byte page */ | 
 | 719 | 	ori	r8, r8, MI_SVALID	/* Make it valid */ | 
 | 720 | 	mtspr	SPRN_MI_TWC, r8 | 
 | 721 | 	mtspr	SPRN_MD_TWC, r8 | 
 | 722 | 	li	r8, MI_BOOTINIT		/* Create RPN for address 0 */ | 
 | 723 | 	mtspr	SPRN_MI_RPN, r8		/* Store TLB entry */ | 
 | 724 | 	mtspr	SPRN_MD_RPN, r8 | 
 | 725 | 	lis	r8, MI_Kp@h		/* Set the protection mode */ | 
 | 726 | 	mtspr	SPRN_MI_AP, r8 | 
 | 727 | 	mtspr	SPRN_MD_AP, r8 | 
 | 728 |  | 
 | 729 | 	/* Map another 8 MByte at the IMMR to get the processor | 
 | 730 | 	 * internal registers (among other things). | 
 | 731 | 	 */ | 
 | 732 | #ifdef CONFIG_PIN_TLB | 
 | 733 | 	addi	r10, r10, 0x0100 | 
 | 734 | 	mtspr	SPRN_MD_CTR, r10 | 
 | 735 | #endif | 
 | 736 | 	mfspr	r9, 638			/* Get current IMMR */ | 
 | 737 | 	andis.	r9, r9, 0xff80		/* Get 8Mbyte boundary */ | 
 | 738 |  | 
 | 739 | 	mr	r8, r9			/* Create vaddr for TLB */ | 
 | 740 | 	ori	r8, r8, MD_EVALID	/* Mark it valid */ | 
 | 741 | 	mtspr	SPRN_MD_EPN, r8 | 
 | 742 | 	li	r8, MD_PS8MEG		/* Set 8M byte page */ | 
 | 743 | 	ori	r8, r8, MD_SVALID	/* Make it valid */ | 
 | 744 | 	mtspr	SPRN_MD_TWC, r8 | 
 | 745 | 	mr	r8, r9			/* Create paddr for TLB */ | 
 | 746 | 	ori	r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ | 
 | 747 | 	mtspr	SPRN_MD_RPN, r8 | 
 | 748 |  | 
 | 749 | #ifdef CONFIG_PIN_TLB | 
 | 750 | 	/* Map two more 8M kernel data pages. | 
 | 751 | 	*/ | 
 | 752 | 	addi	r10, r10, 0x0100 | 
 | 753 | 	mtspr	SPRN_MD_CTR, r10 | 
 | 754 |  | 
 | 755 | 	lis	r8, KERNELBASE@h	/* Create vaddr for TLB */ | 
 | 756 | 	addis	r8, r8, 0x0080		/* Add 8M */ | 
 | 757 | 	ori	r8, r8, MI_EVALID	/* Mark it valid */ | 
 | 758 | 	mtspr	SPRN_MD_EPN, r8 | 
 | 759 | 	li	r9, MI_PS8MEG		/* Set 8M byte page */ | 
 | 760 | 	ori	r9, r9, MI_SVALID	/* Make it valid */ | 
 | 761 | 	mtspr	SPRN_MD_TWC, r9 | 
 | 762 | 	li	r11, MI_BOOTINIT	/* Create RPN for address 0 */ | 
 | 763 | 	addis	r11, r11, 0x0080	/* Add 8M */ | 
| Scott Wood | ccf0d68 | 2007-07-16 11:28:18 -0500 | [diff] [blame] | 764 | 	mtspr	SPRN_MD_RPN, r11 | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 765 |  | 
 | 766 | 	addis	r8, r8, 0x0080		/* Add 8M */ | 
 | 767 | 	mtspr	SPRN_MD_EPN, r8 | 
 | 768 | 	mtspr	SPRN_MD_TWC, r9 | 
 | 769 | 	addis	r11, r11, 0x0080	/* Add 8M */ | 
| Scott Wood | ccf0d68 | 2007-07-16 11:28:18 -0500 | [diff] [blame] | 770 | 	mtspr	SPRN_MD_RPN, r11 | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 771 | #endif | 
 | 772 |  | 
 | 773 | 	/* Since the cache is enabled according to the information we | 
 | 774 | 	 * just loaded into the TLB, invalidate and enable the caches here. | 
 | 775 | 	 * We should probably check/set other modes....later. | 
 | 776 | 	 */ | 
 | 777 | 	lis	r8, IDC_INVALL@h | 
 | 778 | 	mtspr	SPRN_IC_CST, r8 | 
 | 779 | 	mtspr	SPRN_DC_CST, r8 | 
 | 780 | 	lis	r8, IDC_ENABLE@h | 
 | 781 | 	mtspr	SPRN_IC_CST, r8 | 
 | 782 | #ifdef CONFIG_8xx_COPYBACK | 
 | 783 | 	mtspr	SPRN_DC_CST, r8 | 
 | 784 | #else | 
 | 785 | 	/* For a debug option, I left this here to easily enable | 
 | 786 | 	 * the write through cache mode | 
 | 787 | 	 */ | 
 | 788 | 	lis	r8, DC_SFWT@h | 
 | 789 | 	mtspr	SPRN_DC_CST, r8 | 
 | 790 | 	lis	r8, IDC_ENABLE@h | 
 | 791 | 	mtspr	SPRN_DC_CST, r8 | 
 | 792 | #endif | 
 | 793 | 	blr | 
 | 794 |  | 
 | 795 |  | 
 | 796 | /* | 
 | 797 |  * Set up to use a given MMU context. | 
 | 798 |  * r3 is context number, r4 is PGD pointer. | 
 | 799 |  * | 
 | 800 |  * We place the physical address of the new task page directory loaded | 
 | 801 |  * into the MMU base register, and set the ASID compare register with | 
 | 802 |  * the new "context." | 
 | 803 |  */ | 
 | 804 | _GLOBAL(set_context) | 
 | 805 |  | 
 | 806 | #ifdef CONFIG_BDI_SWITCH | 
 | 807 | 	/* Context switch the PTE pointer for the Abatron BDI2000. | 
 | 808 | 	 * The PGDIR is passed as second argument. | 
 | 809 | 	 */ | 
 | 810 | 	lis	r5, KERNELBASE@h | 
 | 811 | 	lwz	r5, 0xf0(r5) | 
 | 812 | 	stw	r4, 0x4(r5) | 
 | 813 | #endif | 
 | 814 |  | 
 | 815 | #ifdef CONFIG_8xx_CPU6 | 
 | 816 | 	lis	r6, cpu6_errata_word@h | 
 | 817 | 	ori	r6, r6, cpu6_errata_word@l | 
 | 818 | 	tophys	(r4, r4) | 
 | 819 | 	li	r7, 0x3980 | 
 | 820 | 	stw	r7, 12(r6) | 
 | 821 | 	lwz	r7, 12(r6) | 
 | 822 |         mtspr   SPRN_M_TWB, r4               /* Update MMU base address */ | 
 | 823 | 	li	r7, 0x3380 | 
 | 824 | 	stw	r7, 12(r6) | 
 | 825 | 	lwz	r7, 12(r6) | 
 | 826 |         mtspr   SPRN_M_CASID, r3             /* Update context */ | 
 | 827 | #else | 
 | 828 |         mtspr   SPRN_M_CASID,r3		/* Update context */ | 
 | 829 | 	tophys	(r4, r4) | 
 | 830 | 	mtspr	SPRN_M_TWB, r4		/* and pgd */ | 
 | 831 | #endif | 
 | 832 | 	SYNC | 
 | 833 | 	blr | 
 | 834 |  | 
 | 835 | #ifdef CONFIG_8xx_CPU6 | 
 | 836 | /* It's here because it is unique to the 8xx. | 
 | 837 |  * It is important we get called with interrupts disabled.  I used to | 
 | 838 |  * do that, but it appears that all code that calls this already had | 
 | 839 |  * interrupt disabled. | 
 | 840 |  */ | 
 | 841 | 	.globl	set_dec_cpu6 | 
 | 842 | set_dec_cpu6: | 
 | 843 | 	lis	r7, cpu6_errata_word@h | 
 | 844 | 	ori	r7, r7, cpu6_errata_word@l | 
 | 845 | 	li	r4, 0x2c00 | 
 | 846 | 	stw	r4, 8(r7) | 
 | 847 | 	lwz	r4, 8(r7) | 
 | 848 |         mtspr   22, r3		/* Update Decrementer */ | 
 | 849 | 	SYNC | 
 | 850 | 	blr | 
 | 851 | #endif | 
 | 852 |  | 
 | 853 | /* | 
 | 854 |  * We put a few things here that have to be page-aligned. | 
 | 855 |  * This stuff goes at the beginning of the data segment, | 
 | 856 |  * which is page-aligned. | 
 | 857 |  */ | 
 | 858 | 	.data | 
 | 859 | 	.globl	sdata | 
 | 860 | sdata: | 
 | 861 | 	.globl	empty_zero_page | 
 | 862 | empty_zero_page: | 
 | 863 | 	.space	4096 | 
 | 864 |  | 
 | 865 | 	.globl	swapper_pg_dir | 
 | 866 | swapper_pg_dir: | 
 | 867 | 	.space	4096 | 
 | 868 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 869 | /* Room for two PTE table poiners, usually the kernel and current user | 
 | 870 |  * pointer to their respective root page table (pgdir). | 
 | 871 |  */ | 
 | 872 | abatron_pteptrs: | 
 | 873 | 	.space	8 | 
 | 874 |  | 
 | 875 | #ifdef CONFIG_8xx_CPU6 | 
 | 876 | 	.globl	cpu6_errata_word | 
 | 877 | cpu6_errata_word: | 
 | 878 | 	.space	16 | 
 | 879 | #endif | 
 | 880 |  |