Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 1 | /* |
| 2 | * This file contains miscellaneous low-level functions. |
| 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 4 | * |
| 5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) |
| 6 | * and Paul Mackerras. |
| 7 | * |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 8 | * kexec bits: |
| 9 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> |
| 10 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz |
| 11 | * |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version |
| 15 | * 2 of the License, or (at your option) any later version. |
| 16 | * |
| 17 | */ |
| 18 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 19 | #include <linux/sys.h> |
| 20 | #include <asm/unistd.h> |
| 21 | #include <asm/errno.h> |
| 22 | #include <asm/reg.h> |
| 23 | #include <asm/page.h> |
| 24 | #include <asm/cache.h> |
| 25 | #include <asm/cputable.h> |
| 26 | #include <asm/mmu.h> |
| 27 | #include <asm/ppc_asm.h> |
| 28 | #include <asm/thread_info.h> |
| 29 | #include <asm/asm-offsets.h> |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 30 | #include <asm/processor.h> |
| 31 | #include <asm/kexec.h> |
Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 32 | #include <asm/bug.h> |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 33 | |
| 34 | .text |
| 35 | |
Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 36 | #ifdef CONFIG_IRQSTACKS |
| 37 | _GLOBAL(call_do_softirq) |
| 38 | mflr r0 |
| 39 | stw r0,4(r1) |
| 40 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) |
| 41 | mr r1,r3 |
| 42 | bl __do_softirq |
| 43 | lwz r1,0(r1) |
| 44 | lwz r0,4(r1) |
| 45 | mtlr r0 |
| 46 | blr |
| 47 | |
| 48 | _GLOBAL(call_handle_irq) |
| 49 | mflr r0 |
| 50 | stw r0,4(r1) |
| 51 | mtctr r6 |
| 52 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) |
| 53 | mr r1,r5 |
| 54 | bctrl |
| 55 | lwz r1,0(r1) |
| 56 | lwz r0,4(r1) |
| 57 | mtlr r0 |
| 58 | blr |
| 59 | #endif /* CONFIG_IRQSTACKS */ |
| 60 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 61 | /* |
Paul Mackerras | f2783c1 | 2005-10-20 09:23:26 +1000 | [diff] [blame] | 62 | * This returns the high 64 bits of the product of two 64-bit numbers. |
| 63 | */ |
| 64 | _GLOBAL(mulhdu) |
| 65 | cmpwi r6,0 |
| 66 | cmpwi cr1,r3,0 |
| 67 | mr r10,r4 |
| 68 | mulhwu r4,r4,r5 |
| 69 | beq 1f |
| 70 | mulhwu r0,r10,r6 |
| 71 | mullw r7,r10,r5 |
| 72 | addc r7,r0,r7 |
| 73 | addze r4,r4 |
| 74 | 1: beqlr cr1 /* all done if high part of A is 0 */ |
| 75 | mr r10,r3 |
| 76 | mullw r9,r3,r5 |
| 77 | mulhwu r3,r3,r5 |
| 78 | beq 2f |
| 79 | mullw r0,r10,r6 |
| 80 | mulhwu r8,r10,r6 |
| 81 | addc r7,r0,r7 |
| 82 | adde r4,r4,r8 |
| 83 | addze r3,r3 |
| 84 | 2: addc r4,r4,r9 |
| 85 | addze r3,r3 |
| 86 | blr |
| 87 | |
| 88 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 89 | * sub_reloc_offset(x) returns x - reloc_offset(). |
| 90 | */ |
| 91 | _GLOBAL(sub_reloc_offset) |
| 92 | mflr r0 |
| 93 | bl 1f |
| 94 | 1: mflr r5 |
| 95 | lis r4,1b@ha |
| 96 | addi r4,r4,1b@l |
| 97 | subf r5,r4,r5 |
| 98 | subf r3,r5,r3 |
| 99 | mtlr r0 |
| 100 | blr |
| 101 | |
| 102 | /* |
| 103 | * reloc_got2 runs through the .got2 section adding an offset |
| 104 | * to each entry. |
| 105 | */ |
| 106 | _GLOBAL(reloc_got2) |
| 107 | mflr r11 |
| 108 | lis r7,__got2_start@ha |
| 109 | addi r7,r7,__got2_start@l |
| 110 | lis r8,__got2_end@ha |
| 111 | addi r8,r8,__got2_end@l |
| 112 | subf r8,r7,r8 |
| 113 | srwi. r8,r8,2 |
| 114 | beqlr |
| 115 | mtctr r8 |
| 116 | bl 1f |
| 117 | 1: mflr r0 |
| 118 | lis r4,1b@ha |
| 119 | addi r4,r4,1b@l |
| 120 | subf r0,r4,r0 |
| 121 | add r7,r0,r7 |
| 122 | 2: lwz r0,0(r7) |
| 123 | add r0,r0,r3 |
| 124 | stw r0,0(r7) |
| 125 | addi r7,r7,4 |
| 126 | bdnz 2b |
| 127 | mtlr r11 |
| 128 | blr |
| 129 | |
| 130 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 131 | * call_setup_cpu - call the setup_cpu function for this cpu |
| 132 | * r3 = data offset, r24 = cpu number |
| 133 | * |
| 134 | * Setup function is called with: |
| 135 | * r3 = data offset |
| 136 | * r4 = ptr to CPU spec (relocated) |
| 137 | */ |
| 138 | _GLOBAL(call_setup_cpu) |
| 139 | addis r4,r3,cur_cpu_spec@ha |
| 140 | addi r4,r4,cur_cpu_spec@l |
| 141 | lwz r4,0(r4) |
| 142 | add r4,r4,r3 |
| 143 | lwz r5,CPU_SPEC_SETUP(r4) |
Geoff Levand | b26f100 | 2006-05-19 14:24:18 +1000 | [diff] [blame] | 144 | cmpwi 0,r5,0 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 145 | add r5,r5,r3 |
| 146 | beqlr |
| 147 | mtctr r5 |
| 148 | bctr |
| 149 | |
| 150 | #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) |
| 151 | |
| 152 | /* This gets called by via-pmu.c to switch the PLL selection |
| 153 | * on 750fx CPU. This function should really be moved to some |
| 154 | * other place (as most of the cpufreq code in via-pmu |
| 155 | */ |
| 156 | _GLOBAL(low_choose_750fx_pll) |
| 157 | /* Clear MSR:EE */ |
| 158 | mfmsr r7 |
| 159 | rlwinm r0,r7,0,17,15 |
| 160 | mtmsr r0 |
| 161 | |
| 162 | /* If switching to PLL1, disable HID0:BTIC */ |
| 163 | cmplwi cr0,r3,0 |
| 164 | beq 1f |
| 165 | mfspr r5,SPRN_HID0 |
| 166 | rlwinm r5,r5,0,27,25 |
| 167 | sync |
| 168 | mtspr SPRN_HID0,r5 |
| 169 | isync |
| 170 | sync |
| 171 | |
| 172 | 1: |
| 173 | /* Calc new HID1 value */ |
| 174 | mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ |
| 175 | rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ |
| 176 | rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ |
| 177 | or r4,r4,r5 |
| 178 | mtspr SPRN_HID1,r4 |
| 179 | |
| 180 | /* Store new HID1 image */ |
Kumar Gala | f608600 | 2008-04-24 06:29:36 +1000 | [diff] [blame] | 181 | rlwinm r6,r1,0,0,(31-THREAD_SHIFT) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 182 | lwz r6,TI_CPU(r6) |
| 183 | slwi r6,r6,2 |
| 184 | addis r6,r6,nap_save_hid1@ha |
| 185 | stw r4,nap_save_hid1@l(r6) |
| 186 | |
| 187 | /* If switching to PLL0, enable HID0:BTIC */ |
| 188 | cmplwi cr0,r3,0 |
| 189 | bne 1f |
| 190 | mfspr r5,SPRN_HID0 |
| 191 | ori r5,r5,HID0_BTIC |
| 192 | sync |
| 193 | mtspr SPRN_HID0,r5 |
| 194 | isync |
| 195 | sync |
| 196 | |
| 197 | 1: |
| 198 | /* Return */ |
| 199 | mtmsr r7 |
| 200 | blr |
| 201 | |
| 202 | _GLOBAL(low_choose_7447a_dfs) |
| 203 | /* Clear MSR:EE */ |
| 204 | mfmsr r7 |
| 205 | rlwinm r0,r7,0,17,15 |
| 206 | mtmsr r0 |
| 207 | |
| 208 | /* Calc new HID1 value */ |
| 209 | mfspr r4,SPRN_HID1 |
| 210 | insrwi r4,r3,1,9 /* insert parameter into bit 9 */ |
| 211 | sync |
| 212 | mtspr SPRN_HID1,r4 |
| 213 | sync |
| 214 | isync |
| 215 | |
| 216 | /* Return */ |
| 217 | mtmsr r7 |
| 218 | blr |
| 219 | |
| 220 | #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ |
| 221 | |
| 222 | /* |
| 223 | * complement mask on the msr then "or" some values on. |
| 224 | * _nmask_and_or_msr(nmask, value_to_or) |
| 225 | */ |
| 226 | _GLOBAL(_nmask_and_or_msr) |
| 227 | mfmsr r0 /* Get current msr */ |
| 228 | andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ |
| 229 | or r0,r0,r4 /* Or on the bits in r4 (second parm) */ |
| 230 | SYNC /* Some chip revs have problems here... */ |
| 231 | mtmsr r0 /* Update machine state */ |
| 232 | isync |
| 233 | blr /* Done */ |
| 234 | |
Benjamin Herrenschmidt | 9dae8af | 2007-12-21 15:39:26 +1100 | [diff] [blame] | 235 | #ifdef CONFIG_40x |
| 236 | |
| 237 | /* |
| 238 | * Do an IO access in real mode |
| 239 | */ |
| 240 | _GLOBAL(real_readb) |
| 241 | mfmsr r7 |
| 242 | ori r0,r7,MSR_DR |
| 243 | xori r0,r0,MSR_DR |
| 244 | sync |
| 245 | mtmsr r0 |
| 246 | sync |
| 247 | isync |
| 248 | lbz r3,0(r3) |
| 249 | sync |
| 250 | mtmsr r7 |
| 251 | sync |
| 252 | isync |
| 253 | blr |
| 254 | |
| 255 | /* |
| 256 | * Do an IO access in real mode |
| 257 | */ |
| 258 | _GLOBAL(real_writeb) |
| 259 | mfmsr r7 |
| 260 | ori r0,r7,MSR_DR |
| 261 | xori r0,r0,MSR_DR |
| 262 | sync |
| 263 | mtmsr r0 |
| 264 | sync |
| 265 | isync |
| 266 | stb r3,0(r4) |
| 267 | sync |
| 268 | mtmsr r7 |
| 269 | sync |
| 270 | isync |
| 271 | blr |
| 272 | |
| 273 | #endif /* CONFIG_40x */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 274 | |
Kumar Gala | 0ba3418 | 2008-07-15 16:12:25 -0500 | [diff] [blame] | 275 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 276 | /* |
| 277 | * Flush instruction cache. |
| 278 | * This is a no-op on the 601. |
| 279 | */ |
| 280 | _GLOBAL(flush_instruction_cache) |
| 281 | #if defined(CONFIG_8xx) |
| 282 | isync |
| 283 | lis r5, IDC_INVALL@h |
| 284 | mtspr SPRN_IC_CST, r5 |
| 285 | #elif defined(CONFIG_4xx) |
| 286 | #ifdef CONFIG_403GCX |
| 287 | li r3, 512 |
| 288 | mtctr r3 |
| 289 | lis r4, KERNELBASE@h |
| 290 | 1: iccci 0, r4 |
| 291 | addi r4, r4, 16 |
| 292 | bdnz 1b |
| 293 | #else |
| 294 | lis r3, KERNELBASE@h |
| 295 | iccci 0,r3 |
| 296 | #endif |
| 297 | #elif CONFIG_FSL_BOOKE |
| 298 | BEGIN_FTR_SECTION |
| 299 | mfspr r3,SPRN_L1CSR0 |
| 300 | ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC |
| 301 | /* msync; isync recommended here */ |
| 302 | mtspr SPRN_L1CSR0,r3 |
| 303 | isync |
| 304 | blr |
David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 305 | END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 306 | mfspr r3,SPRN_L1CSR1 |
| 307 | ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR |
| 308 | mtspr SPRN_L1CSR1,r3 |
| 309 | #else |
| 310 | mfspr r3,SPRN_PVR |
| 311 | rlwinm r3,r3,16,16,31 |
| 312 | cmpwi 0,r3,1 |
| 313 | beqlr /* for 601, do nothing */ |
| 314 | /* 603/604 processor - use invalidate-all bit in HID0 */ |
| 315 | mfspr r3,SPRN_HID0 |
| 316 | ori r3,r3,HID0_ICFI |
| 317 | mtspr SPRN_HID0,r3 |
| 318 | #endif /* CONFIG_8xx/4xx */ |
| 319 | isync |
| 320 | blr |
| 321 | |
| 322 | /* |
| 323 | * Write any modified data cache blocks out to memory |
| 324 | * and invalidate the corresponding instruction cache blocks. |
| 325 | * This is a no-op on the 601. |
| 326 | * |
| 327 | * flush_icache_range(unsigned long start, unsigned long stop) |
| 328 | */ |
Kumar Gala | b76e59d | 2008-06-26 01:57:58 -0500 | [diff] [blame] | 329 | _KPROBE(__flush_icache_range) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 330 | BEGIN_FTR_SECTION |
| 331 | blr /* for 601, do nothing */ |
David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 332 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 333 | li r5,L1_CACHE_BYTES-1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 334 | andc r3,r3,r5 |
| 335 | subf r4,r3,r4 |
| 336 | add r4,r4,r5 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 337 | srwi. r4,r4,L1_CACHE_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 338 | beqlr |
| 339 | mtctr r4 |
| 340 | mr r6,r3 |
| 341 | 1: dcbst 0,r3 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 342 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 343 | bdnz 1b |
| 344 | sync /* wait for dcbst's to get to ram */ |
Josh Boyer | 14d7575 | 2009-08-19 04:27:53 +0000 | [diff] [blame] | 345 | #ifndef CONFIG_44x |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 346 | mtctr r4 |
| 347 | 2: icbi 0,r6 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 348 | addi r6,r6,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 349 | bdnz 2b |
Josh Boyer | 14d7575 | 2009-08-19 04:27:53 +0000 | [diff] [blame] | 350 | #else |
| 351 | /* Flash invalidate on 44x because we are passed kmapped addresses and |
| 352 | this doesn't work for userspace pages due to the virtually tagged |
| 353 | icache. Sigh. */ |
| 354 | iccci 0, r0 |
| 355 | #endif |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 356 | sync /* additional sync needed on g4 */ |
| 357 | isync |
| 358 | blr |
| 359 | /* |
| 360 | * Write any modified data cache blocks out to memory. |
| 361 | * Does not invalidate the corresponding cache lines (especially for |
| 362 | * any corresponding instruction cache). |
| 363 | * |
| 364 | * clean_dcache_range(unsigned long start, unsigned long stop) |
| 365 | */ |
| 366 | _GLOBAL(clean_dcache_range) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 367 | li r5,L1_CACHE_BYTES-1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 368 | andc r3,r3,r5 |
| 369 | subf r4,r3,r4 |
| 370 | add r4,r4,r5 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 371 | srwi. r4,r4,L1_CACHE_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 372 | beqlr |
| 373 | mtctr r4 |
| 374 | |
| 375 | 1: dcbst 0,r3 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 376 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 377 | bdnz 1b |
| 378 | sync /* wait for dcbst's to get to ram */ |
| 379 | blr |
| 380 | |
| 381 | /* |
| 382 | * Write any modified data cache blocks out to memory and invalidate them. |
| 383 | * Does not invalidate the corresponding instruction cache blocks. |
| 384 | * |
| 385 | * flush_dcache_range(unsigned long start, unsigned long stop) |
| 386 | */ |
| 387 | _GLOBAL(flush_dcache_range) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 388 | li r5,L1_CACHE_BYTES-1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 389 | andc r3,r3,r5 |
| 390 | subf r4,r3,r4 |
| 391 | add r4,r4,r5 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 392 | srwi. r4,r4,L1_CACHE_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 393 | beqlr |
| 394 | mtctr r4 |
| 395 | |
| 396 | 1: dcbf 0,r3 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 397 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 398 | bdnz 1b |
| 399 | sync /* wait for dcbst's to get to ram */ |
| 400 | blr |
| 401 | |
| 402 | /* |
| 403 | * Like above, but invalidate the D-cache. This is used by the 8xx |
| 404 | * to invalidate the cache so the PPC core doesn't get stale data |
| 405 | * from the CPM (no cache snooping here :-). |
| 406 | * |
| 407 | * invalidate_dcache_range(unsigned long start, unsigned long stop) |
| 408 | */ |
| 409 | _GLOBAL(invalidate_dcache_range) |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 410 | li r5,L1_CACHE_BYTES-1 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 411 | andc r3,r3,r5 |
| 412 | subf r4,r3,r4 |
| 413 | add r4,r4,r5 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 414 | srwi. r4,r4,L1_CACHE_SHIFT |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 415 | beqlr |
| 416 | mtctr r4 |
| 417 | |
| 418 | 1: dcbi 0,r3 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 419 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 420 | bdnz 1b |
| 421 | sync /* wait for dcbi's to get to ram */ |
| 422 | blr |
| 423 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 424 | /* |
| 425 | * Flush a particular page from the data cache to RAM. |
| 426 | * Note: this is necessary because the instruction cache does *not* |
| 427 | * snoop from the data cache. |
| 428 | * This is a no-op on the 601 which has a unified cache. |
| 429 | * |
| 430 | * void __flush_dcache_icache(void *page) |
| 431 | */ |
| 432 | _GLOBAL(__flush_dcache_icache) |
| 433 | BEGIN_FTR_SECTION |
David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 434 | blr |
| 435 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 436 | rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ |
| 437 | li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 438 | mtctr r4 |
| 439 | mr r6,r3 |
| 440 | 0: dcbst 0,r3 /* Write line to ram */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 441 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 442 | bdnz 0b |
| 443 | sync |
Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 444 | #ifndef CONFIG_44x |
| 445 | /* We don't flush the icache on 44x. Those have a virtual icache |
| 446 | * and we don't have access to the virtual address here (it's |
| 447 | * not the page vaddr but where it's mapped in user space). The |
| 448 | * flushing of the icache on these is handled elsewhere, when |
| 449 | * a change in the address space occurs, before returning to |
| 450 | * user space |
| 451 | */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 452 | mtctr r4 |
| 453 | 1: icbi 0,r6 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 454 | addi r6,r6,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 455 | bdnz 1b |
| 456 | sync |
| 457 | isync |
Benjamin Herrenschmidt | b98ac05 | 2007-10-31 16:42:19 +1100 | [diff] [blame] | 458 | #endif /* CONFIG_44x */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 459 | blr |
| 460 | |
| 461 | /* |
| 462 | * Flush a particular page from the data cache to RAM, identified |
| 463 | * by its physical address. We turn off the MMU so we can just use |
| 464 | * the physical address (this may be a highmem page without a kernel |
| 465 | * mapping). |
| 466 | * |
| 467 | * void __flush_dcache_icache_phys(unsigned long physaddr) |
| 468 | */ |
| 469 | _GLOBAL(__flush_dcache_icache_phys) |
| 470 | BEGIN_FTR_SECTION |
| 471 | blr /* for 601, do nothing */ |
David Gibson | 4508dc2 | 2007-06-13 14:52:57 +1000 | [diff] [blame] | 472 | END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 473 | mfmsr r10 |
| 474 | rlwinm r0,r10,0,28,26 /* clear DR */ |
| 475 | mtmsr r0 |
| 476 | isync |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 477 | rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */ |
| 478 | li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 479 | mtctr r4 |
| 480 | mr r6,r3 |
| 481 | 0: dcbst 0,r3 /* Write line to ram */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 482 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 483 | bdnz 0b |
| 484 | sync |
| 485 | mtctr r4 |
| 486 | 1: icbi 0,r6 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 487 | addi r6,r6,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 488 | bdnz 1b |
| 489 | sync |
| 490 | mtmsr r10 /* restore DR */ |
| 491 | isync |
| 492 | blr |
| 493 | |
| 494 | /* |
| 495 | * Clear pages using the dcbz instruction, which doesn't cause any |
| 496 | * memory traffic (except to write out any cache lines which get |
| 497 | * displaced). This only works on cacheable memory. |
| 498 | * |
| 499 | * void clear_pages(void *page, int order) ; |
| 500 | */ |
| 501 | _GLOBAL(clear_pages) |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 502 | li r0,PAGE_SIZE/L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 503 | slw r0,r0,r4 |
| 504 | mtctr r0 |
| 505 | #ifdef CONFIG_8xx |
| 506 | li r4, 0 |
| 507 | 1: stw r4, 0(r3) |
| 508 | stw r4, 4(r3) |
| 509 | stw r4, 8(r3) |
| 510 | stw r4, 12(r3) |
| 511 | #else |
| 512 | 1: dcbz 0,r3 |
| 513 | #endif |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 514 | addi r3,r3,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 515 | bdnz 1b |
| 516 | blr |
| 517 | |
| 518 | /* |
| 519 | * Copy a whole page. We use the dcbz instruction on the destination |
| 520 | * to reduce memory traffic (it eliminates the unnecessary reads of |
| 521 | * the destination into cache). This requires that the destination |
| 522 | * is cacheable. |
| 523 | */ |
| 524 | #define COPY_16_BYTES \ |
| 525 | lwz r6,4(r4); \ |
| 526 | lwz r7,8(r4); \ |
| 527 | lwz r8,12(r4); \ |
| 528 | lwzu r9,16(r4); \ |
| 529 | stw r6,4(r3); \ |
| 530 | stw r7,8(r3); \ |
| 531 | stw r8,12(r3); \ |
| 532 | stwu r9,16(r3) |
| 533 | |
| 534 | _GLOBAL(copy_page) |
| 535 | addi r3,r3,-4 |
| 536 | addi r4,r4,-4 |
| 537 | |
| 538 | #ifdef CONFIG_8xx |
| 539 | /* don't use prefetch on 8xx */ |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 540 | li r0,4096/L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 541 | mtctr r0 |
| 542 | 1: COPY_16_BYTES |
| 543 | bdnz 1b |
| 544 | blr |
| 545 | |
| 546 | #else /* not 8xx, we can prefetch */ |
| 547 | li r5,4 |
| 548 | |
| 549 | #if MAX_COPY_PREFETCH > 1 |
| 550 | li r0,MAX_COPY_PREFETCH |
| 551 | li r11,4 |
| 552 | mtctr r0 |
| 553 | 11: dcbt r11,r4 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 554 | addi r11,r11,L1_CACHE_BYTES |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 555 | bdnz 11b |
| 556 | #else /* MAX_COPY_PREFETCH == 1 */ |
| 557 | dcbt r5,r4 |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 558 | li r11,L1_CACHE_BYTES+4 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 559 | #endif /* MAX_COPY_PREFETCH */ |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 560 | li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 561 | crclr 4*cr0+eq |
| 562 | 2: |
| 563 | mtctr r0 |
| 564 | 1: |
| 565 | dcbt r11,r4 |
| 566 | dcbz r5,r3 |
| 567 | COPY_16_BYTES |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 568 | #if L1_CACHE_BYTES >= 32 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 569 | COPY_16_BYTES |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 570 | #if L1_CACHE_BYTES >= 64 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 571 | COPY_16_BYTES |
| 572 | COPY_16_BYTES |
Stephen Rothwell | 7dffb72 | 2005-10-17 11:50:32 +1000 | [diff] [blame] | 573 | #if L1_CACHE_BYTES >= 128 |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 574 | COPY_16_BYTES |
| 575 | COPY_16_BYTES |
| 576 | COPY_16_BYTES |
| 577 | COPY_16_BYTES |
| 578 | #endif |
| 579 | #endif |
| 580 | #endif |
| 581 | bdnz 1b |
| 582 | beqlr |
| 583 | crnot 4*cr0+eq,4*cr0+eq |
| 584 | li r0,MAX_COPY_PREFETCH |
| 585 | li r11,4 |
| 586 | b 2b |
| 587 | #endif /* CONFIG_8xx */ |
| 588 | |
| 589 | /* |
| 590 | * void atomic_clear_mask(atomic_t mask, atomic_t *addr) |
| 591 | * void atomic_set_mask(atomic_t mask, atomic_t *addr); |
| 592 | */ |
| 593 | _GLOBAL(atomic_clear_mask) |
| 594 | 10: lwarx r5,0,r4 |
| 595 | andc r5,r5,r3 |
| 596 | PPC405_ERR77(0,r4) |
| 597 | stwcx. r5,0,r4 |
| 598 | bne- 10b |
| 599 | blr |
| 600 | _GLOBAL(atomic_set_mask) |
| 601 | 10: lwarx r5,0,r4 |
| 602 | or r5,r5,r3 |
| 603 | PPC405_ERR77(0,r4) |
| 604 | stwcx. r5,0,r4 |
| 605 | bne- 10b |
| 606 | blr |
| 607 | |
| 608 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 609 | * Extended precision shifts. |
| 610 | * |
| 611 | * Updated to be valid for shift counts from 0 to 63 inclusive. |
| 612 | * -- Gabriel |
| 613 | * |
| 614 | * R3/R4 has 64 bit value |
| 615 | * R5 has shift count |
| 616 | * result in R3/R4 |
| 617 | * |
| 618 | * ashrdi3: arithmetic right shift (sign propagation) |
| 619 | * lshrdi3: logical right shift |
| 620 | * ashldi3: left shift |
| 621 | */ |
| 622 | _GLOBAL(__ashrdi3) |
| 623 | subfic r6,r5,32 |
| 624 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count |
| 625 | addi r7,r5,32 # could be xori, or addi with -32 |
| 626 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) |
| 627 | rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 |
| 628 | sraw r7,r3,r7 # t2 = MSW >> (count-32) |
| 629 | or r4,r4,r6 # LSW |= t1 |
| 630 | slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 |
| 631 | sraw r3,r3,r5 # MSW = MSW >> count |
| 632 | or r4,r4,r7 # LSW |= t2 |
| 633 | blr |
| 634 | |
| 635 | _GLOBAL(__ashldi3) |
| 636 | subfic r6,r5,32 |
| 637 | slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count |
| 638 | addi r7,r5,32 # could be xori, or addi with -32 |
| 639 | srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) |
| 640 | slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) |
| 641 | or r3,r3,r6 # MSW |= t1 |
| 642 | slw r4,r4,r5 # LSW = LSW << count |
| 643 | or r3,r3,r7 # MSW |= t2 |
| 644 | blr |
| 645 | |
| 646 | _GLOBAL(__lshrdi3) |
| 647 | subfic r6,r5,32 |
| 648 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count |
| 649 | addi r7,r5,32 # could be xori, or addi with -32 |
| 650 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) |
| 651 | srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) |
| 652 | or r4,r4,r6 # LSW |= t1 |
| 653 | srw r3,r3,r5 # MSW = MSW >> count |
| 654 | or r4,r4,r7 # LSW |= t2 |
| 655 | blr |
| 656 | |
Paul Mackerras | 95ff54f | 2008-03-13 09:39:55 +1100 | [diff] [blame] | 657 | /* |
| 658 | * 64-bit comparison: __ucmpdi2(u64 a, u64 b) |
| 659 | * Returns 0 if a < b, 1 if a == b, 2 if a > b. |
| 660 | */ |
| 661 | _GLOBAL(__ucmpdi2) |
| 662 | cmplw r3,r5 |
| 663 | li r3,1 |
| 664 | bne 1f |
| 665 | cmplw r4,r6 |
| 666 | beqlr |
| 667 | 1: li r3,0 |
| 668 | bltlr |
| 669 | li r3,2 |
| 670 | blr |
| 671 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 672 | _GLOBAL(abs) |
| 673 | srawi r4,r3,31 |
| 674 | xor r3,r3,r4 |
| 675 | sub r3,r3,r4 |
| 676 | blr |
| 677 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 678 | /* |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 679 | * Create a kernel thread |
| 680 | * kernel_thread(fn, arg, flags) |
| 681 | */ |
| 682 | _GLOBAL(kernel_thread) |
| 683 | stwu r1,-16(r1) |
| 684 | stw r30,8(r1) |
| 685 | stw r31,12(r1) |
| 686 | mr r30,r3 /* function */ |
| 687 | mr r31,r4 /* argument */ |
| 688 | ori r3,r5,CLONE_VM /* flags */ |
| 689 | oris r3,r3,CLONE_UNTRACED>>16 |
| 690 | li r4,0 /* new sp (unused) */ |
| 691 | li r0,__NR_clone |
| 692 | sc |
Josh Poimboeuf | 41c2e94 | 2008-10-07 06:10:03 +0000 | [diff] [blame] | 693 | bns+ 1f /* did system call indicate error? */ |
| 694 | neg r3,r3 /* if so, make return code negative */ |
| 695 | 1: cmpwi 0,r3,0 /* parent or child? */ |
| 696 | bne 2f /* return if parent */ |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 697 | li r0,0 /* make top-level stack frame */ |
| 698 | stwu r0,-16(r1) |
| 699 | mtlr r30 /* fn addr in lr */ |
| 700 | mr r3,r31 /* load arg and call fn */ |
| 701 | PPC440EP_ERR42 |
| 702 | blrl |
| 703 | li r0,__NR_exit /* exit if function returns */ |
| 704 | li r3,0 |
| 705 | sc |
Josh Poimboeuf | 41c2e94 | 2008-10-07 06:10:03 +0000 | [diff] [blame] | 706 | 2: lwz r30,8(r1) |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 707 | lwz r31,12(r1) |
| 708 | addi r1,r1,16 |
| 709 | blr |
| 710 | |
Paul Mackerras | 9994a33 | 2005-10-10 22:36:14 +1000 | [diff] [blame] | 711 | /* |
| 712 | * This routine is just here to keep GCC happy - sigh... |
| 713 | */ |
| 714 | _GLOBAL(__main) |
| 715 | blr |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 716 | |
| 717 | #ifdef CONFIG_KEXEC |
| 718 | /* |
| 719 | * Must be relocatable PIC code callable as a C function. |
| 720 | */ |
| 721 | .globl relocate_new_kernel |
| 722 | relocate_new_kernel: |
| 723 | /* r3 = page_list */ |
| 724 | /* r4 = reboot_code_buffer */ |
| 725 | /* r5 = start_address */ |
| 726 | |
| 727 | li r0, 0 |
| 728 | |
| 729 | /* |
| 730 | * Set Machine Status Register to a known status, |
| 731 | * switch the MMU off and jump to 1: in a single step. |
| 732 | */ |
| 733 | |
| 734 | mr r8, r0 |
| 735 | ori r8, r8, MSR_RI|MSR_ME |
| 736 | mtspr SPRN_SRR1, r8 |
| 737 | addi r8, r4, 1f - relocate_new_kernel |
| 738 | mtspr SPRN_SRR0, r8 |
| 739 | sync |
| 740 | rfi |
| 741 | |
| 742 | 1: |
| 743 | /* from this point address translation is turned off */ |
| 744 | /* and interrupts are disabled */ |
| 745 | |
| 746 | /* set a new stack at the bottom of our page... */ |
| 747 | /* (not really needed now) */ |
Paul Collins | d9178f4 | 2008-08-16 18:55:54 +1000 | [diff] [blame] | 748 | addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 749 | stw r0, 0(r1) |
| 750 | |
| 751 | /* Do the copies */ |
| 752 | li r6, 0 /* checksum */ |
| 753 | mr r0, r3 |
| 754 | b 1f |
| 755 | |
| 756 | 0: /* top, read another word for the indirection page */ |
| 757 | lwzu r0, 4(r3) |
| 758 | |
| 759 | 1: |
| 760 | /* is it a destination page? (r8) */ |
| 761 | rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ |
| 762 | beq 2f |
| 763 | |
| 764 | rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 765 | b 0b |
| 766 | |
| 767 | 2: /* is it an indirection page? (r3) */ |
| 768 | rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ |
| 769 | beq 2f |
| 770 | |
| 771 | rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 772 | subi r3, r3, 4 |
| 773 | b 0b |
| 774 | |
| 775 | 2: /* are we done? */ |
| 776 | rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ |
| 777 | beq 2f |
| 778 | b 3f |
| 779 | |
| 780 | 2: /* is it a source page? (r9) */ |
| 781 | rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ |
| 782 | beq 0b |
| 783 | |
| 784 | rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ |
| 785 | |
| 786 | li r7, PAGE_SIZE / 4 |
| 787 | mtctr r7 |
| 788 | subi r9, r9, 4 |
| 789 | subi r8, r8, 4 |
| 790 | 9: |
| 791 | lwzu r0, 4(r9) /* do the copy */ |
| 792 | xor r6, r6, r0 |
| 793 | stwu r0, 4(r8) |
| 794 | dcbst 0, r8 |
| 795 | sync |
| 796 | icbi 0, r8 |
| 797 | bdnz 9b |
| 798 | |
| 799 | addi r9, r9, 4 |
| 800 | addi r8, r8, 4 |
| 801 | b 0b |
| 802 | |
| 803 | 3: |
| 804 | |
| 805 | /* To be certain of avoiding problems with self-modifying code |
| 806 | * execute a serializing instruction here. |
| 807 | */ |
| 808 | isync |
| 809 | sync |
| 810 | |
| 811 | /* jump to the entry point, usually the setup routine */ |
| 812 | mtlr r5 |
| 813 | blrl |
| 814 | |
| 815 | 1: b 1b |
| 816 | |
| 817 | relocate_new_kernel_end: |
| 818 | |
| 819 | .globl relocate_new_kernel_size |
| 820 | relocate_new_kernel_size: |
| 821 | .long relocate_new_kernel_end - relocate_new_kernel |
| 822 | #endif |