| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file contains miscellaneous low-level functions. | 
 | 3 |  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
 | 4 |  * | 
 | 5 |  * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | 
 | 6 |  * and Paul Mackerras. | 
 | 7 |  * | 
| Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 8 |  * kexec bits: | 
 | 9 |  * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com> | 
 | 10 |  * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz | 
 | 11 |  * | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 12 |  * This program is free software; you can redistribute it and/or | 
 | 13 |  * modify it under the terms of the GNU General Public License | 
 | 14 |  * as published by the Free Software Foundation; either version | 
 | 15 |  * 2 of the License, or (at your option) any later version. | 
 | 16 |  * | 
 | 17 |  */ | 
 | 18 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 19 | #include <linux/sys.h> | 
 | 20 | #include <asm/unistd.h> | 
 | 21 | #include <asm/errno.h> | 
 | 22 | #include <asm/reg.h> | 
 | 23 | #include <asm/page.h> | 
 | 24 | #include <asm/cache.h> | 
 | 25 | #include <asm/cputable.h> | 
 | 26 | #include <asm/mmu.h> | 
 | 27 | #include <asm/ppc_asm.h> | 
 | 28 | #include <asm/thread_info.h> | 
 | 29 | #include <asm/asm-offsets.h> | 
| Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 30 | #include <asm/processor.h> | 
 | 31 | #include <asm/kexec.h> | 
| Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 32 | #include <asm/bug.h> | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 33 |  | 
 | 34 | 	.text | 
 | 35 |  | 
| Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 36 | #ifdef CONFIG_IRQSTACKS | 
 | 37 | _GLOBAL(call_do_softirq) | 
 | 38 | 	mflr	r0 | 
 | 39 | 	stw	r0,4(r1) | 
 | 40 | 	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) | 
 | 41 | 	mr	r1,r3 | 
 | 42 | 	bl	__do_softirq | 
 | 43 | 	lwz	r1,0(r1) | 
 | 44 | 	lwz	r0,4(r1) | 
 | 45 | 	mtlr	r0 | 
 | 46 | 	blr | 
 | 47 |  | 
 | 48 | _GLOBAL(call_handle_irq) | 
 | 49 | 	mflr	r0 | 
 | 50 | 	stw	r0,4(r1) | 
 | 51 | 	mtctr	r6 | 
 | 52 | 	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) | 
 | 53 | 	mr	r1,r5 | 
 | 54 | 	bctrl | 
 | 55 | 	lwz	r1,0(r1) | 
 | 56 | 	lwz	r0,4(r1) | 
 | 57 | 	mtlr	r0 | 
 | 58 | 	blr | 
 | 59 | #endif /* CONFIG_IRQSTACKS */ | 
 | 60 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 61 | /* | 
| Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 62 |  * This returns the high 64 bits of the product of two 64-bit numbers. | 
 | 63 |  */ | 
 | 64 | _GLOBAL(mulhdu) | 
 | 65 | 	cmpwi	r6,0 | 
 | 66 | 	cmpwi	cr1,r3,0 | 
 | 67 | 	mr	r10,r4 | 
 | 68 | 	mulhwu	r4,r4,r5 | 
 | 69 | 	beq	1f | 
 | 70 | 	mulhwu	r0,r10,r6 | 
 | 71 | 	mullw	r7,r10,r5 | 
 | 72 | 	addc	r7,r0,r7 | 
 | 73 | 	addze	r4,r4 | 
 | 74 | 1:	beqlr	cr1		/* all done if high part of A is 0 */ | 
 | 75 | 	mr	r10,r3 | 
 | 76 | 	mullw	r9,r3,r5 | 
 | 77 | 	mulhwu	r3,r3,r5 | 
 | 78 | 	beq	2f | 
 | 79 | 	mullw	r0,r10,r6 | 
 | 80 | 	mulhwu	r8,r10,r6 | 
 | 81 | 	addc	r7,r0,r7 | 
 | 82 | 	adde	r4,r4,r8 | 
 | 83 | 	addze	r3,r3 | 
 | 84 | 2:	addc	r4,r4,r9 | 
 | 85 | 	addze	r3,r3 | 
 | 86 | 	blr | 
 | 87 |  | 
 | 88 | /* | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 89 |  * sub_reloc_offset(x) returns x - reloc_offset(). | 
 | 90 |  */ | 
 | 91 | _GLOBAL(sub_reloc_offset) | 
 | 92 | 	mflr	r0 | 
 | 93 | 	bl	1f | 
 | 94 | 1:	mflr	r5 | 
 | 95 | 	lis	r4,1b@ha | 
 | 96 | 	addi	r4,r4,1b@l | 
 | 97 | 	subf	r5,r4,r5 | 
 | 98 | 	subf	r3,r5,r3 | 
 | 99 | 	mtlr	r0 | 
 | 100 | 	blr | 
 | 101 |  | 
 | 102 | /* | 
 | 103 |  * reloc_got2 runs through the .got2 section adding an offset | 
 | 104 |  * to each entry. | 
 | 105 |  */ | 
 | 106 | _GLOBAL(reloc_got2) | 
 | 107 | 	mflr	r11 | 
 | 108 | 	lis	r7,__got2_start@ha | 
 | 109 | 	addi	r7,r7,__got2_start@l | 
 | 110 | 	lis	r8,__got2_end@ha | 
 | 111 | 	addi	r8,r8,__got2_end@l | 
 | 112 | 	subf	r8,r7,r8 | 
 | 113 | 	srwi.	r8,r8,2 | 
 | 114 | 	beqlr | 
 | 115 | 	mtctr	r8 | 
 | 116 | 	bl	1f | 
 | 117 | 1:	mflr	r0 | 
 | 118 | 	lis	r4,1b@ha | 
 | 119 | 	addi	r4,r4,1b@l | 
 | 120 | 	subf	r0,r4,r0 | 
 | 121 | 	add	r7,r0,r7 | 
 | 122 | 2:	lwz	r0,0(r7) | 
 | 123 | 	add	r0,r0,r3 | 
 | 124 | 	stw	r0,0(r7) | 
 | 125 | 	addi	r7,r7,4 | 
 | 126 | 	bdnz	2b | 
 | 127 | 	mtlr	r11 | 
 | 128 | 	blr | 
 | 129 |  | 
 | 130 | /* | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 131 |  * call_setup_cpu - call the setup_cpu function for this cpu | 
 | 132 |  * r3 = data offset, r24 = cpu number | 
 | 133 |  * | 
 | 134 |  * Setup function is called with: | 
 | 135 |  *   r3 = data offset | 
 | 136 |  *   r4 = ptr to CPU spec (relocated) | 
 | 137 |  */ | 
 | 138 | _GLOBAL(call_setup_cpu) | 
 | 139 | 	addis	r4,r3,cur_cpu_spec@ha | 
 | 140 | 	addi	r4,r4,cur_cpu_spec@l | 
 | 141 | 	lwz	r4,0(r4) | 
 | 142 | 	add	r4,r4,r3 | 
 | 143 | 	lwz	r5,CPU_SPEC_SETUP(r4) | 
| Geoff Levand | b26f100 | 2006-05-19 14:24:18 +1000 | [diff] [blame] | 144 | 	cmpwi	0,r5,0 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 145 | 	add	r5,r5,r3 | 
 | 146 | 	beqlr | 
 | 147 | 	mtctr	r5 | 
 | 148 | 	bctr | 
 | 149 |  | 
 | 150 | #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) | 
 | 151 |  | 
 | 152 | /* This gets called by via-pmu.c to switch the PLL selection | 
 | 153 |  * on 750fx CPU. This function should really be moved to some | 
 | 154 |  * other place (as most of the cpufreq code in via-pmu | 
 | 155 |  */ | 
 | 156 | _GLOBAL(low_choose_750fx_pll) | 
 | 157 | 	/* Clear MSR:EE */ | 
 | 158 | 	mfmsr	r7 | 
 | 159 | 	rlwinm	r0,r7,0,17,15 | 
 | 160 | 	mtmsr	r0 | 
 | 161 |  | 
 | 162 | 	/* If switching to PLL1, disable HID0:BTIC */ | 
 | 163 | 	cmplwi	cr0,r3,0 | 
 | 164 | 	beq	1f | 
 | 165 | 	mfspr	r5,SPRN_HID0 | 
 | 166 | 	rlwinm	r5,r5,0,27,25 | 
 | 167 | 	sync | 
 | 168 | 	mtspr	SPRN_HID0,r5 | 
 | 169 | 	isync | 
 | 170 | 	sync | 
 | 171 |  | 
 | 172 | 1: | 
 | 173 | 	/* Calc new HID1 value */ | 
 | 174 | 	mfspr	r4,SPRN_HID1	/* Build a HID1:PS bit from parameter */ | 
 | 175 | 	rlwinm	r5,r3,16,15,15	/* Clear out HID1:PS from value read */ | 
 | 176 | 	rlwinm	r4,r4,0,16,14	/* Could have I used rlwimi here ? */ | 
 | 177 | 	or	r4,r4,r5 | 
 | 178 | 	mtspr	SPRN_HID1,r4 | 
 | 179 |  | 
 | 180 | 	/* Store new HID1 image */ | 
| Kumar Gala | f608600 | 2008-04-24 06:29:36 +1000 | [diff] [blame] | 181 | 	rlwinm	r6,r1,0,0,(31-THREAD_SHIFT) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 182 | 	lwz	r6,TI_CPU(r6) | 
 | 183 | 	slwi	r6,r6,2 | 
 | 184 | 	addis	r6,r6,nap_save_hid1@ha | 
 | 185 | 	stw	r4,nap_save_hid1@l(r6) | 
 | 186 |  | 
 | 187 | 	/* If switching to PLL0, enable HID0:BTIC */ | 
 | 188 | 	cmplwi	cr0,r3,0 | 
 | 189 | 	bne	1f | 
 | 190 | 	mfspr	r5,SPRN_HID0 | 
 | 191 | 	ori	r5,r5,HID0_BTIC | 
 | 192 | 	sync | 
 | 193 | 	mtspr	SPRN_HID0,r5 | 
 | 194 | 	isync | 
 | 195 | 	sync | 
 | 196 |  | 
 | 197 | 1: | 
 | 198 | 	/* Return */ | 
 | 199 | 	mtmsr	r7 | 
 | 200 | 	blr | 
 | 201 |  | 
 | 202 | _GLOBAL(low_choose_7447a_dfs) | 
 | 203 | 	/* Clear MSR:EE */ | 
 | 204 | 	mfmsr	r7 | 
 | 205 | 	rlwinm	r0,r7,0,17,15 | 
 | 206 | 	mtmsr	r0 | 
 | 207 | 	 | 
 | 208 | 	/* Calc new HID1 value */ | 
 | 209 | 	mfspr	r4,SPRN_HID1 | 
 | 210 | 	insrwi	r4,r3,1,9	/* insert parameter into bit 9 */ | 
 | 211 | 	sync | 
 | 212 | 	mtspr	SPRN_HID1,r4 | 
 | 213 | 	sync | 
 | 214 | 	isync | 
 | 215 |  | 
 | 216 | 	/* Return */ | 
 | 217 | 	mtmsr	r7 | 
 | 218 | 	blr | 
 | 219 |  | 
 | 220 | #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ | 
 | 221 |  | 
 | 222 | /* | 
 | 223 |  * complement mask on the msr then "or" some values on. | 
 | 224 |  *     _nmask_and_or_msr(nmask, value_to_or) | 
 | 225 |  */ | 
 | 226 | _GLOBAL(_nmask_and_or_msr) | 
 | 227 | 	mfmsr	r0		/* Get current msr */ | 
 | 228 | 	andc	r0,r0,r3	/* And off the bits set in r3 (first parm) */ | 
 | 229 | 	or	r0,r0,r4	/* Or on the bits in r4 (second parm) */ | 
 | 230 | 	SYNC			/* Some chip revs have problems here... */ | 
 | 231 | 	mtmsr	r0		/* Update machine state */ | 
 | 232 | 	isync | 
 | 233 | 	blr			/* Done */ | 
 | 234 |  | 
| Benjamin Herrenschmidt | 9dae8af | 2007-12-21 15:39:26 +1100 | [diff] [blame] | 235 | #ifdef CONFIG_40x | 
 | 236 |  | 
 | 237 | /* | 
 | 238 |  * Do an IO access in real mode | 
 | 239 |  */ | 
 | 240 | _GLOBAL(real_readb) | 
 | 241 | 	mfmsr	r7 | 
 | 242 | 	ori	r0,r7,MSR_DR | 
 | 243 | 	xori	r0,r0,MSR_DR | 
 | 244 | 	sync | 
 | 245 | 	mtmsr	r0 | 
 | 246 | 	sync | 
 | 247 | 	isync | 
 | 248 | 	lbz	r3,0(r3) | 
 | 249 | 	sync | 
 | 250 | 	mtmsr	r7 | 
 | 251 | 	sync | 
 | 252 | 	isync | 
 | 253 | 	blr | 
 | 254 |  | 
 | 255 | 	/* | 
 | 256 |  * Do an IO access in real mode | 
 | 257 |  */ | 
 | 258 | _GLOBAL(real_writeb) | 
 | 259 | 	mfmsr	r7 | 
 | 260 | 	ori	r0,r7,MSR_DR | 
 | 261 | 	xori	r0,r0,MSR_DR | 
 | 262 | 	sync | 
 | 263 | 	mtmsr	r0 | 
 | 264 | 	sync | 
 | 265 | 	isync | 
 | 266 | 	stb	r3,0(r4) | 
 | 267 | 	sync | 
 | 268 | 	mtmsr	r7 | 
 | 269 | 	sync | 
 | 270 | 	isync | 
 | 271 | 	blr | 
 | 272 |  | 
 | 273 | #endif /* CONFIG_40x */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 274 |  | 
| Kumar Gala | 0ba3418 | 2008-07-15 16:12:25 -0500 | [diff] [blame] | 275 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 276 | /* | 
 | 277 |  * Flush instruction cache. | 
 | 278 |  * This is a no-op on the 601. | 
 | 279 |  */ | 
 | 280 | _GLOBAL(flush_instruction_cache) | 
 | 281 | #if defined(CONFIG_8xx) | 
 | 282 | 	isync | 
 | 283 | 	lis	r5, IDC_INVALL@h | 
 | 284 | 	mtspr	SPRN_IC_CST, r5 | 
 | 285 | #elif defined(CONFIG_4xx) | 
 | 286 | #ifdef CONFIG_403GCX | 
 | 287 | 	li      r3, 512 | 
 | 288 | 	mtctr   r3 | 
 | 289 | 	lis     r4, KERNELBASE@h | 
 | 290 | 1:	iccci   0, r4 | 
 | 291 | 	addi    r4, r4, 16 | 
 | 292 | 	bdnz    1b | 
 | 293 | #else | 
 | 294 | 	lis	r3, KERNELBASE@h | 
 | 295 | 	iccci	0,r3 | 
 | 296 | #endif | 
 | 297 | #elif CONFIG_FSL_BOOKE | 
 | 298 | BEGIN_FTR_SECTION | 
 | 299 | 	mfspr   r3,SPRN_L1CSR0 | 
 | 300 | 	ori     r3,r3,L1CSR0_CFI|L1CSR0_CLFC | 
 | 301 | 	/* msync; isync recommended here */ | 
 | 302 | 	mtspr   SPRN_L1CSR0,r3 | 
 | 303 | 	isync | 
 | 304 | 	blr | 
| David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 305 | END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 306 | 	mfspr	r3,SPRN_L1CSR1 | 
 | 307 | 	ori	r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR | 
 | 308 | 	mtspr	SPRN_L1CSR1,r3 | 
 | 309 | #else | 
 | 310 | 	mfspr	r3,SPRN_PVR | 
 | 311 | 	rlwinm	r3,r3,16,16,31 | 
 | 312 | 	cmpwi	0,r3,1 | 
 | 313 | 	beqlr			/* for 601, do nothing */ | 
 | 314 | 	/* 603/604 processor - use invalidate-all bit in HID0 */ | 
 | 315 | 	mfspr	r3,SPRN_HID0 | 
 | 316 | 	ori	r3,r3,HID0_ICFI | 
 | 317 | 	mtspr	SPRN_HID0,r3 | 
 | 318 | #endif /* CONFIG_8xx/4xx */ | 
 | 319 | 	isync | 
 | 320 | 	blr | 
 | 321 |  | 
 | 322 | /* | 
 | 323 |  * Write any modified data cache blocks out to memory | 
 | 324 |  * and invalidate the corresponding instruction cache blocks. | 
 | 325 |  * This is a no-op on the 601. | 
 | 326 |  * | 
 | 327 |  * flush_icache_range(unsigned long start, unsigned long stop) | 
 | 328 |  */ | 
| Kumar Gala | b76e59d | 2008-06-26 01:57:58 -0500 | [diff] [blame] | 329 | _KPROBE(__flush_icache_range) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 330 | BEGIN_FTR_SECTION | 
 | 331 | 	blr				/* for 601, do nothing */ | 
| David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 332 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 333 | 	li	r5,L1_CACHE_BYTES-1 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 334 | 	andc	r3,r3,r5 | 
 | 335 | 	subf	r4,r3,r4 | 
 | 336 | 	add	r4,r4,r5 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 337 | 	srwi.	r4,r4,L1_CACHE_SHIFT | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 338 | 	beqlr | 
 | 339 | 	mtctr	r4 | 
 | 340 | 	mr	r6,r3 | 
 | 341 | 1:	dcbst	0,r3 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 342 | 	addi	r3,r3,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 343 | 	bdnz	1b | 
 | 344 | 	sync				/* wait for dcbst's to get to ram */ | 
 | 345 | 	mtctr	r4 | 
 | 346 | 2:	icbi	0,r6 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 347 | 	addi	r6,r6,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 348 | 	bdnz	2b | 
 | 349 | 	sync				/* additional sync needed on g4 */ | 
 | 350 | 	isync | 
 | 351 | 	blr | 
 | 352 | /* | 
 | 353 |  * Write any modified data cache blocks out to memory. | 
 | 354 |  * Does not invalidate the corresponding cache lines (especially for | 
 | 355 |  * any corresponding instruction cache). | 
 | 356 |  * | 
 | 357 |  * clean_dcache_range(unsigned long start, unsigned long stop) | 
 | 358 |  */ | 
 | 359 | _GLOBAL(clean_dcache_range) | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 360 | 	li	r5,L1_CACHE_BYTES-1 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 361 | 	andc	r3,r3,r5 | 
 | 362 | 	subf	r4,r3,r4 | 
 | 363 | 	add	r4,r4,r5 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 364 | 	srwi.	r4,r4,L1_CACHE_SHIFT | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 365 | 	beqlr | 
 | 366 | 	mtctr	r4 | 
 | 367 |  | 
 | 368 | 1:	dcbst	0,r3 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 369 | 	addi	r3,r3,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 370 | 	bdnz	1b | 
 | 371 | 	sync				/* wait for dcbst's to get to ram */ | 
 | 372 | 	blr | 
 | 373 |  | 
 | 374 | /* | 
 | 375 |  * Write any modified data cache blocks out to memory and invalidate them. | 
 | 376 |  * Does not invalidate the corresponding instruction cache blocks. | 
 | 377 |  * | 
 | 378 |  * flush_dcache_range(unsigned long start, unsigned long stop) | 
 | 379 |  */ | 
 | 380 | _GLOBAL(flush_dcache_range) | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 381 | 	li	r5,L1_CACHE_BYTES-1 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 382 | 	andc	r3,r3,r5 | 
 | 383 | 	subf	r4,r3,r4 | 
 | 384 | 	add	r4,r4,r5 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 385 | 	srwi.	r4,r4,L1_CACHE_SHIFT | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 386 | 	beqlr | 
 | 387 | 	mtctr	r4 | 
 | 388 |  | 
 | 389 | 1:	dcbf	0,r3 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 390 | 	addi	r3,r3,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 391 | 	bdnz	1b | 
 | 392 | 	sync				/* wait for dcbst's to get to ram */ | 
 | 393 | 	blr | 
 | 394 |  | 
 | 395 | /* | 
 | 396 |  * Like above, but invalidate the D-cache.  This is used by the 8xx | 
 | 397 |  * to invalidate the cache so the PPC core doesn't get stale data | 
 | 398 |  * from the CPM (no cache snooping here :-). | 
 | 399 |  * | 
 | 400 |  * invalidate_dcache_range(unsigned long start, unsigned long stop) | 
 | 401 |  */ | 
 | 402 | _GLOBAL(invalidate_dcache_range) | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 403 | 	li	r5,L1_CACHE_BYTES-1 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 404 | 	andc	r3,r3,r5 | 
 | 405 | 	subf	r4,r3,r4 | 
 | 406 | 	add	r4,r4,r5 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 407 | 	srwi.	r4,r4,L1_CACHE_SHIFT | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 408 | 	beqlr | 
 | 409 | 	mtctr	r4 | 
 | 410 |  | 
 | 411 | 1:	dcbi	0,r3 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 412 | 	addi	r3,r3,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 413 | 	bdnz	1b | 
 | 414 | 	sync				/* wait for dcbi's to get to ram */ | 
 | 415 | 	blr | 
 | 416 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 417 | /* | 
 | 418 |  * Flush a particular page from the data cache to RAM. | 
 | 419 |  * Note: this is necessary because the instruction cache does *not* | 
 | 420 |  * snoop from the data cache. | 
 | 421 |  * This is a no-op on the 601 which has a unified cache. | 
 | 422 |  * | 
 | 423 |  *	void __flush_dcache_icache(void *page) | 
 | 424 |  */ | 
 | 425 | _GLOBAL(__flush_dcache_icache) | 
 | 426 | BEGIN_FTR_SECTION | 
| David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 427 | 	blr | 
 | 428 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) | 
| Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 429 | 	rlwinm	r3,r3,0,0,31-PAGE_SHIFT		/* Get page base address */ | 
 | 430 | 	li	r4,PAGE_SIZE/L1_CACHE_BYTES	/* Number of lines in a page */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 431 | 	mtctr	r4 | 
 | 432 | 	mr	r6,r3 | 
 | 433 | 0:	dcbst	0,r3				/* Write line to ram */ | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 434 | 	addi	r3,r3,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 435 | 	bdnz	0b | 
 | 436 | 	sync | 
| Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 437 | #ifndef CONFIG_44x | 
 | 438 | 	/* We don't flush the icache on 44x. Those have a virtual icache | 
 | 439 | 	 * and we don't have access to the virtual address here (it's | 
 | 440 | 	 * not the page vaddr but where it's mapped in user space). The | 
 | 441 | 	 * flushing of the icache on these is handled elsewhere, when | 
 | 442 | 	 * a change in the address space occurs, before returning to | 
 | 443 | 	 * user space | 
 | 444 | 	 */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 445 | 	mtctr	r4 | 
 | 446 | 1:	icbi	0,r6 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 447 | 	addi	r6,r6,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 448 | 	bdnz	1b | 
 | 449 | 	sync | 
 | 450 | 	isync | 
| Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 451 | #endif /* CONFIG_44x */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 452 | 	blr | 
 | 453 |  | 
 | 454 | /* | 
 | 455 |  * Flush a particular page from the data cache to RAM, identified | 
 | 456 |  * by its physical address.  We turn off the MMU so we can just use | 
 | 457 |  * the physical address (this may be a highmem page without a kernel | 
 | 458 |  * mapping). | 
 | 459 |  * | 
 | 460 |  *	void __flush_dcache_icache_phys(unsigned long physaddr) | 
 | 461 |  */ | 
 | 462 | _GLOBAL(__flush_dcache_icache_phys) | 
 | 463 | BEGIN_FTR_SECTION | 
 | 464 | 	blr					/* for 601, do nothing */ | 
| David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 465 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 466 | 	mfmsr	r10 | 
 | 467 | 	rlwinm	r0,r10,0,28,26			/* clear DR */ | 
 | 468 | 	mtmsr	r0 | 
 | 469 | 	isync | 
| Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 470 | 	rlwinm	r3,r3,0,0,31-PAGE_SHIFT		/* Get page base address */ | 
 | 471 | 	li	r4,PAGE_SIZE/L1_CACHE_BYTES	/* Number of lines in a page */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 472 | 	mtctr	r4 | 
 | 473 | 	mr	r6,r3 | 
 | 474 | 0:	dcbst	0,r3				/* Write line to ram */ | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 475 | 	addi	r3,r3,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 476 | 	bdnz	0b | 
 | 477 | 	sync | 
 | 478 | 	mtctr	r4 | 
 | 479 | 1:	icbi	0,r6 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 480 | 	addi	r6,r6,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 481 | 	bdnz	1b | 
 | 482 | 	sync | 
 | 483 | 	mtmsr	r10				/* restore DR */ | 
 | 484 | 	isync | 
 | 485 | 	blr | 
 | 486 |  | 
 | 487 | /* | 
 | 488 |  * Clear pages using the dcbz instruction, which doesn't cause any | 
 | 489 |  * memory traffic (except to write out any cache lines which get | 
 | 490 |  * displaced).  This only works on cacheable memory. | 
 | 491 |  * | 
 | 492 |  * void clear_pages(void *page, int order) ; | 
 | 493 |  */ | 
 | 494 | _GLOBAL(clear_pages) | 
| Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 495 | 	li	r0,PAGE_SIZE/L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 496 | 	slw	r0,r0,r4 | 
 | 497 | 	mtctr	r0 | 
 | 498 | #ifdef CONFIG_8xx | 
 | 499 | 	li	r4, 0 | 
 | 500 | 1:	stw	r4, 0(r3) | 
 | 501 | 	stw	r4, 4(r3) | 
 | 502 | 	stw	r4, 8(r3) | 
 | 503 | 	stw	r4, 12(r3) | 
 | 504 | #else | 
 | 505 | 1:	dcbz	0,r3 | 
 | 506 | #endif | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 507 | 	addi	r3,r3,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 508 | 	bdnz	1b | 
 | 509 | 	blr | 
 | 510 |  | 
 | 511 | /* | 
 | 512 |  * Copy a whole page.  We use the dcbz instruction on the destination | 
 | 513 |  * to reduce memory traffic (it eliminates the unnecessary reads of | 
 | 514 |  * the destination into cache).  This requires that the destination | 
 | 515 |  * is cacheable. | 
 | 516 |  */ | 
 | 517 | #define COPY_16_BYTES		\ | 
 | 518 | 	lwz	r6,4(r4);	\ | 
 | 519 | 	lwz	r7,8(r4);	\ | 
 | 520 | 	lwz	r8,12(r4);	\ | 
 | 521 | 	lwzu	r9,16(r4);	\ | 
 | 522 | 	stw	r6,4(r3);	\ | 
 | 523 | 	stw	r7,8(r3);	\ | 
 | 524 | 	stw	r8,12(r3);	\ | 
 | 525 | 	stwu	r9,16(r3) | 
 | 526 |  | 
 | 527 | _GLOBAL(copy_page) | 
 | 528 | 	addi	r3,r3,-4 | 
 | 529 | 	addi	r4,r4,-4 | 
 | 530 |  | 
 | 531 | #ifdef CONFIG_8xx | 
 | 532 | 	/* don't use prefetch on 8xx */ | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 533 |     	li	r0,4096/L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 534 | 	mtctr	r0 | 
 | 535 | 1:	COPY_16_BYTES | 
 | 536 | 	bdnz	1b | 
 | 537 | 	blr | 
 | 538 |  | 
 | 539 | #else	/* not 8xx, we can prefetch */ | 
 | 540 | 	li	r5,4 | 
 | 541 |  | 
 | 542 | #if MAX_COPY_PREFETCH > 1 | 
 | 543 | 	li	r0,MAX_COPY_PREFETCH | 
 | 544 | 	li	r11,4 | 
 | 545 | 	mtctr	r0 | 
 | 546 | 11:	dcbt	r11,r4 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 547 | 	addi	r11,r11,L1_CACHE_BYTES | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 548 | 	bdnz	11b | 
 | 549 | #else /* MAX_COPY_PREFETCH == 1 */ | 
 | 550 | 	dcbt	r5,r4 | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 551 | 	li	r11,L1_CACHE_BYTES+4 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 552 | #endif /* MAX_COPY_PREFETCH */ | 
| Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 553 | 	li	r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 554 | 	crclr	4*cr0+eq | 
 | 555 | 2: | 
 | 556 | 	mtctr	r0 | 
 | 557 | 1: | 
 | 558 | 	dcbt	r11,r4 | 
 | 559 | 	dcbz	r5,r3 | 
 | 560 | 	COPY_16_BYTES | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 561 | #if L1_CACHE_BYTES >= 32 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 562 | 	COPY_16_BYTES | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 563 | #if L1_CACHE_BYTES >= 64 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 564 | 	COPY_16_BYTES | 
 | 565 | 	COPY_16_BYTES | 
| Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 566 | #if L1_CACHE_BYTES >= 128 | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 567 | 	COPY_16_BYTES | 
 | 568 | 	COPY_16_BYTES | 
 | 569 | 	COPY_16_BYTES | 
 | 570 | 	COPY_16_BYTES | 
 | 571 | #endif | 
 | 572 | #endif | 
 | 573 | #endif | 
 | 574 | 	bdnz	1b | 
 | 575 | 	beqlr | 
 | 576 | 	crnot	4*cr0+eq,4*cr0+eq | 
 | 577 | 	li	r0,MAX_COPY_PREFETCH | 
 | 578 | 	li	r11,4 | 
 | 579 | 	b	2b | 
 | 580 | #endif	/* CONFIG_8xx */ | 
 | 581 |  | 
 | 582 | /* | 
 | 583 |  * void atomic_clear_mask(atomic_t mask, atomic_t *addr) | 
 | 584 |  * void atomic_set_mask(atomic_t mask, atomic_t *addr); | 
 | 585 |  */ | 
 | 586 | _GLOBAL(atomic_clear_mask) | 
 | 587 | 10:	lwarx	r5,0,r4 | 
 | 588 | 	andc	r5,r5,r3 | 
 | 589 | 	PPC405_ERR77(0,r4) | 
 | 590 | 	stwcx.	r5,0,r4 | 
 | 591 | 	bne-	10b | 
 | 592 | 	blr | 
 | 593 | _GLOBAL(atomic_set_mask) | 
 | 594 | 10:	lwarx	r5,0,r4 | 
 | 595 | 	or	r5,r5,r3 | 
 | 596 | 	PPC405_ERR77(0,r4) | 
 | 597 | 	stwcx.	r5,0,r4 | 
 | 598 | 	bne-	10b | 
 | 599 | 	blr | 
 | 600 |  | 
 | 601 | /* | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 602 |  * Extended precision shifts. | 
 | 603 |  * | 
 | 604 |  * Updated to be valid for shift counts from 0 to 63 inclusive. | 
 | 605 |  * -- Gabriel | 
 | 606 |  * | 
 | 607 |  * R3/R4 has 64 bit value | 
 | 608 |  * R5    has shift count | 
 | 609 |  * result in R3/R4 | 
 | 610 |  * | 
 | 611 |  *  ashrdi3: arithmetic right shift (sign propagation)	 | 
 | 612 |  *  lshrdi3: logical right shift | 
 | 613 |  *  ashldi3: left shift | 
 | 614 |  */ | 
 | 615 | _GLOBAL(__ashrdi3) | 
 | 616 | 	subfic	r6,r5,32 | 
 | 617 | 	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count | 
 | 618 | 	addi	r7,r5,32	# could be xori, or addi with -32 | 
 | 619 | 	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count) | 
 | 620 | 	rlwinm	r8,r7,0,32	# t3 = (count < 32) ? 32 : 0 | 
 | 621 | 	sraw	r7,r3,r7	# t2 = MSW >> (count-32) | 
 | 622 | 	or	r4,r4,r6	# LSW |= t1 | 
 | 623 | 	slw	r7,r7,r8	# t2 = (count < 32) ? 0 : t2 | 
 | 624 | 	sraw	r3,r3,r5	# MSW = MSW >> count | 
 | 625 | 	or	r4,r4,r7	# LSW |= t2 | 
 | 626 | 	blr | 
 | 627 |  | 
 | 628 | _GLOBAL(__ashldi3) | 
 | 629 | 	subfic	r6,r5,32 | 
 | 630 | 	slw	r3,r3,r5	# MSW = count > 31 ? 0 : MSW << count | 
 | 631 | 	addi	r7,r5,32	# could be xori, or addi with -32 | 
 | 632 | 	srw	r6,r4,r6	# t1 = count > 31 ? 0 : LSW >> (32-count) | 
 | 633 | 	slw	r7,r4,r7	# t2 = count < 32 ? 0 : LSW << (count-32) | 
 | 634 | 	or	r3,r3,r6	# MSW |= t1 | 
 | 635 | 	slw	r4,r4,r5	# LSW = LSW << count | 
 | 636 | 	or	r3,r3,r7	# MSW |= t2 | 
 | 637 | 	blr | 
 | 638 |  | 
 | 639 | _GLOBAL(__lshrdi3) | 
 | 640 | 	subfic	r6,r5,32 | 
 | 641 | 	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count | 
 | 642 | 	addi	r7,r5,32	# could be xori, or addi with -32 | 
 | 643 | 	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count) | 
 | 644 | 	srw	r7,r3,r7	# t2 = count < 32 ? 0 : MSW >> (count-32) | 
 | 645 | 	or	r4,r4,r6	# LSW |= t1 | 
 | 646 | 	srw	r3,r3,r5	# MSW = MSW >> count | 
 | 647 | 	or	r4,r4,r7	# LSW |= t2 | 
 | 648 | 	blr | 
 | 649 |  | 
| Paul Mackerras | 95ff54f | 2008-03-13 09:39:55 +1100 | [diff] [blame] | 650 | /* | 
 | 651 |  * 64-bit comparison: __ucmpdi2(u64 a, u64 b) | 
 | 652 |  * Returns 0 if a < b, 1 if a == b, 2 if a > b. | 
 | 653 |  */ | 
 | 654 | _GLOBAL(__ucmpdi2) | 
 | 655 | 	cmplw	r3,r5 | 
 | 656 | 	li	r3,1 | 
 | 657 | 	bne	1f | 
 | 658 | 	cmplw	r4,r6 | 
 | 659 | 	beqlr | 
 | 660 | 1:	li	r3,0 | 
 | 661 | 	bltlr | 
 | 662 | 	li	r3,2 | 
 | 663 | 	blr | 
 | 664 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 665 | _GLOBAL(abs) | 
 | 666 | 	srawi	r4,r3,31 | 
 | 667 | 	xor	r3,r3,r4 | 
 | 668 | 	sub	r3,r3,r4 | 
 | 669 | 	blr | 
 | 670 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 671 | /* | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 672 |  * Create a kernel thread | 
 | 673 |  *   kernel_thread(fn, arg, flags) | 
 | 674 |  */ | 
 | 675 | _GLOBAL(kernel_thread) | 
 | 676 | 	stwu	r1,-16(r1) | 
 | 677 | 	stw	r30,8(r1) | 
 | 678 | 	stw	r31,12(r1) | 
 | 679 | 	mr	r30,r3		/* function */ | 
 | 680 | 	mr	r31,r4		/* argument */ | 
 | 681 | 	ori	r3,r5,CLONE_VM	/* flags */ | 
 | 682 | 	oris	r3,r3,CLONE_UNTRACED>>16 | 
 | 683 | 	li	r4,0		/* new sp (unused) */ | 
 | 684 | 	li	r0,__NR_clone | 
 | 685 | 	sc | 
| Josh Poimboeuf | 41c2e94 | 2008-10-07 06:10:03 +0000 | [diff] [blame] | 686 | 	bns+	1f		/* did system call indicate error? */ | 
 | 687 | 	neg	r3,r3		/* if so, make return code negative */ | 
 | 688 | 1:	cmpwi	0,r3,0		/* parent or child? */ | 
 | 689 | 	bne	2f		/* return if parent */ | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 690 | 	li	r0,0		/* make top-level stack frame */ | 
 | 691 | 	stwu	r0,-16(r1) | 
 | 692 | 	mtlr	r30		/* fn addr in lr */ | 
 | 693 | 	mr	r3,r31		/* load arg and call fn */ | 
 | 694 | 	PPC440EP_ERR42 | 
 | 695 | 	blrl | 
 | 696 | 	li	r0,__NR_exit	/* exit if function returns */ | 
 | 697 | 	li	r3,0 | 
 | 698 | 	sc | 
| Josh Poimboeuf | 41c2e94 | 2008-10-07 06:10:03 +0000 | [diff] [blame] | 699 | 2:	lwz	r30,8(r1) | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 700 | 	lwz	r31,12(r1) | 
 | 701 | 	addi	r1,r1,16 | 
 | 702 | 	blr | 
 | 703 |  | 
| Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 704 | /* | 
 | 705 |  * This routine is just here to keep GCC happy - sigh... | 
 | 706 |  */ | 
 | 707 | _GLOBAL(__main) | 
 | 708 | 	blr | 
| Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 709 |  | 
 | 710 | #ifdef CONFIG_KEXEC | 
 | 711 | 	/* | 
 | 712 | 	 * Must be relocatable PIC code callable as a C function. | 
 | 713 | 	 */ | 
 | 714 | 	.globl relocate_new_kernel | 
 | 715 | relocate_new_kernel: | 
 | 716 | 	/* r3 = page_list   */ | 
 | 717 | 	/* r4 = reboot_code_buffer */ | 
 | 718 | 	/* r5 = start_address      */ | 
 | 719 |  | 
 | 720 | 	li	r0, 0 | 
 | 721 |  | 
 | 722 | 	/* | 
 | 723 | 	 * Set Machine Status Register to a known status, | 
 | 724 | 	 * switch the MMU off and jump to 1: in a single step. | 
 | 725 | 	 */ | 
 | 726 |  | 
 | 727 | 	mr	r8, r0 | 
 | 728 | 	ori     r8, r8, MSR_RI|MSR_ME | 
 | 729 | 	mtspr	SPRN_SRR1, r8 | 
 | 730 | 	addi	r8, r4, 1f - relocate_new_kernel | 
 | 731 | 	mtspr	SPRN_SRR0, r8 | 
 | 732 | 	sync | 
 | 733 | 	rfi | 
 | 734 |  | 
 | 735 | 1: | 
 | 736 | 	/* from this point address translation is turned off */ | 
 | 737 | 	/* and interrupts are disabled */ | 
 | 738 |  | 
 | 739 | 	/* set a new stack at the bottom of our page... */ | 
 | 740 | 	/* (not really needed now) */ | 
| Paul Collins | d9178f4 | 2008-08-16 18:55:54 +1000 | [diff] [blame] | 741 | 	addi	r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ | 
| Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 742 | 	stw	r0, 0(r1) | 
 | 743 |  | 
 | 744 | 	/* Do the copies */ | 
 | 745 | 	li	r6, 0 /* checksum */ | 
 | 746 | 	mr	r0, r3 | 
 | 747 | 	b	1f | 
 | 748 |  | 
 | 749 | 0:	/* top, read another word for the indirection page */ | 
 | 750 | 	lwzu	r0, 4(r3) | 
 | 751 |  | 
 | 752 | 1: | 
 | 753 | 	/* is it a destination page? (r8) */ | 
 | 754 | 	rlwinm.	r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ | 
 | 755 | 	beq	2f | 
 | 756 |  | 
 | 757 | 	rlwinm	r8, r0, 0, 0, 19 /* clear kexec flags, page align */ | 
 | 758 | 	b	0b | 
 | 759 |  | 
 | 760 | 2:	/* is it an indirection page? (r3) */ | 
 | 761 | 	rlwinm.	r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ | 
 | 762 | 	beq	2f | 
 | 763 |  | 
 | 764 | 	rlwinm	r3, r0, 0, 0, 19 /* clear kexec flags, page align */ | 
 | 765 | 	subi	r3, r3, 4 | 
 | 766 | 	b	0b | 
 | 767 |  | 
 | 768 | 2:	/* are we done? */ | 
 | 769 | 	rlwinm.	r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ | 
 | 770 | 	beq	2f | 
 | 771 | 	b	3f | 
 | 772 |  | 
 | 773 | 2:	/* is it a source page? (r9) */ | 
 | 774 | 	rlwinm.	r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ | 
 | 775 | 	beq	0b | 
 | 776 |  | 
 | 777 | 	rlwinm	r9, r0, 0, 0, 19 /* clear kexec flags, page align */ | 
 | 778 |  | 
 | 779 | 	li	r7, PAGE_SIZE / 4 | 
 | 780 | 	mtctr   r7 | 
 | 781 | 	subi    r9, r9, 4 | 
 | 782 | 	subi    r8, r8, 4 | 
 | 783 | 9: | 
 | 784 | 	lwzu    r0, 4(r9)  /* do the copy */ | 
 | 785 | 	xor	r6, r6, r0 | 
 | 786 | 	stwu    r0, 4(r8) | 
 | 787 | 	dcbst	0, r8 | 
 | 788 | 	sync | 
 | 789 | 	icbi	0, r8 | 
 | 790 | 	bdnz    9b | 
 | 791 |  | 
 | 792 | 	addi    r9, r9, 4 | 
 | 793 | 	addi    r8, r8, 4 | 
 | 794 | 	b	0b | 
 | 795 |  | 
 | 796 | 3: | 
 | 797 |  | 
 | 798 | 	/* To be certain of avoiding problems with self-modifying code | 
 | 799 | 	 * execute a serializing instruction here. | 
 | 800 | 	 */ | 
 | 801 | 	isync | 
 | 802 | 	sync | 
 | 803 |  | 
 | 804 | 	/* jump to the entry point, usually the setup routine */ | 
 | 805 | 	mtlr	r5 | 
 | 806 | 	blrl | 
 | 807 |  | 
 | 808 | 1:	b	1b | 
 | 809 |  | 
 | 810 | relocate_new_kernel_end: | 
 | 811 |  | 
 | 812 | 	.globl relocate_new_kernel_size | 
 | 813 | relocate_new_kernel_size: | 
 | 814 | 	.long relocate_new_kernel_end - relocate_new_kernel | 
 | 815 | #endif |