blob: 8b642ab26d3767c23205125b1022dcad45964efd [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
Michael Ellerman3d1229d2005-11-14 23:35:00 +11008 * kexec bits:
9 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
11 *
Paul Mackerras9994a332005-10-10 22:36:14 +100012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
Paul Mackerras9994a332005-10-10 22:36:14 +100019#include <linux/sys.h>
20#include <asm/unistd.h>
21#include <asm/errno.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/cache.h>
25#include <asm/cputable.h>
26#include <asm/mmu.h>
27#include <asm/ppc_asm.h>
28#include <asm/thread_info.h>
29#include <asm/asm-offsets.h>
Michael Ellerman3d1229d2005-11-14 23:35:00 +110030#include <asm/processor.h>
31#include <asm/kexec.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100032
33 .text
34
Paul Mackerras9994a332005-10-10 22:36:14 +100035/*
Paul Mackerrasf2783c12005-10-20 09:23:26 +100036 * This returns the high 64 bits of the product of two 64-bit numbers.
37 */
38_GLOBAL(mulhdu)
39 cmpwi r6,0
40 cmpwi cr1,r3,0
41 mr r10,r4
42 mulhwu r4,r4,r5
43 beq 1f
44 mulhwu r0,r10,r6
45 mullw r7,r10,r5
46 addc r7,r0,r7
47 addze r4,r4
481: beqlr cr1 /* all done if high part of A is 0 */
49 mr r10,r3
50 mullw r9,r3,r5
51 mulhwu r3,r3,r5
52 beq 2f
53 mullw r0,r10,r6
54 mulhwu r8,r10,r6
55 addc r7,r0,r7
56 adde r4,r4,r8
57 addze r3,r3
582: addc r4,r4,r9
59 addze r3,r3
60 blr
61
62/*
Paul Mackerras9994a332005-10-10 22:36:14 +100063 * sub_reloc_offset(x) returns x - reloc_offset().
64 */
65_GLOBAL(sub_reloc_offset)
66 mflr r0
67 bl 1f
681: mflr r5
69 lis r4,1b@ha
70 addi r4,r4,1b@l
71 subf r5,r4,r5
72 subf r3,r5,r3
73 mtlr r0
74 blr
75
76/*
77 * reloc_got2 runs through the .got2 section adding an offset
78 * to each entry.
79 */
80_GLOBAL(reloc_got2)
81 mflr r11
82 lis r7,__got2_start@ha
83 addi r7,r7,__got2_start@l
84 lis r8,__got2_end@ha
85 addi r8,r8,__got2_end@l
86 subf r8,r7,r8
87 srwi. r8,r8,2
88 beqlr
89 mtctr r8
90 bl 1f
911: mflr r0
92 lis r4,1b@ha
93 addi r4,r4,1b@l
94 subf r0,r4,r0
95 add r7,r0,r7
962: lwz r0,0(r7)
97 add r0,r0,r3
98 stw r0,0(r7)
99 addi r7,r7,4
100 bdnz 2b
101 mtlr r11
102 blr
103
104/*
Paul Mackerras9994a332005-10-10 22:36:14 +1000105 * call_setup_cpu - call the setup_cpu function for this cpu
106 * r3 = data offset, r24 = cpu number
107 *
108 * Setup function is called with:
109 * r3 = data offset
110 * r4 = ptr to CPU spec (relocated)
111 */
112_GLOBAL(call_setup_cpu)
113 addis r4,r3,cur_cpu_spec@ha
114 addi r4,r4,cur_cpu_spec@l
115 lwz r4,0(r4)
116 add r4,r4,r3
117 lwz r5,CPU_SPEC_SETUP(r4)
Geoff Levandb26f1002006-05-19 14:24:18 +1000118 cmpwi 0,r5,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000119 add r5,r5,r3
120 beqlr
121 mtctr r5
122 bctr
123
124#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
125
126/* This gets called by via-pmu.c to switch the PLL selection
127 * on 750fx CPU. This function should really be moved to some
128 * other place (as most of the cpufreq code in via-pmu
129 */
130_GLOBAL(low_choose_750fx_pll)
131 /* Clear MSR:EE */
132 mfmsr r7
133 rlwinm r0,r7,0,17,15
134 mtmsr r0
135
136 /* If switching to PLL1, disable HID0:BTIC */
137 cmplwi cr0,r3,0
138 beq 1f
139 mfspr r5,SPRN_HID0
140 rlwinm r5,r5,0,27,25
141 sync
142 mtspr SPRN_HID0,r5
143 isync
144 sync
145
1461:
147 /* Calc new HID1 value */
148 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
149 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
150 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
151 or r4,r4,r5
152 mtspr SPRN_HID1,r4
153
154 /* Store new HID1 image */
155 rlwinm r6,r1,0,0,18
156 lwz r6,TI_CPU(r6)
157 slwi r6,r6,2
158 addis r6,r6,nap_save_hid1@ha
159 stw r4,nap_save_hid1@l(r6)
160
161 /* If switching to PLL0, enable HID0:BTIC */
162 cmplwi cr0,r3,0
163 bne 1f
164 mfspr r5,SPRN_HID0
165 ori r5,r5,HID0_BTIC
166 sync
167 mtspr SPRN_HID0,r5
168 isync
169 sync
170
1711:
172 /* Return */
173 mtmsr r7
174 blr
175
176_GLOBAL(low_choose_7447a_dfs)
177 /* Clear MSR:EE */
178 mfmsr r7
179 rlwinm r0,r7,0,17,15
180 mtmsr r0
181
182 /* Calc new HID1 value */
183 mfspr r4,SPRN_HID1
184 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
185 sync
186 mtspr SPRN_HID1,r4
187 sync
188 isync
189
190 /* Return */
191 mtmsr r7
192 blr
193
194#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
195
196/*
197 * complement mask on the msr then "or" some values on.
198 * _nmask_and_or_msr(nmask, value_to_or)
199 */
200_GLOBAL(_nmask_and_or_msr)
201 mfmsr r0 /* Get current msr */
202 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
203 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
204 SYNC /* Some chip revs have problems here... */
205 mtmsr r0 /* Update machine state */
206 isync
207 blr /* Done */
208
209
210/*
211 * Flush MMU TLB
212 */
213_GLOBAL(_tlbia)
214#if defined(CONFIG_40x)
215 sync /* Flush to memory before changing mapping */
216 tlbia
217 isync /* Flush shadow TLB */
218#elif defined(CONFIG_44x)
219 li r3,0
220 sync
221
222 /* Load high watermark */
223 lis r4,tlb_44x_hwater@ha
224 lwz r5,tlb_44x_hwater@l(r4)
225
2261: tlbwe r3,r3,PPC44x_TLB_PAGEID
227 addi r3,r3,1
228 cmpw 0,r3,r5
229 ble 1b
230
231 isync
232#elif defined(CONFIG_FSL_BOOKE)
233 /* Invalidate all entries in TLB0 */
234 li r3, 0x04
235 tlbivax 0,3
236 /* Invalidate all entries in TLB1 */
237 li r3, 0x0c
238 tlbivax 0,3
239 /* Invalidate all entries in TLB2 */
240 li r3, 0x14
241 tlbivax 0,3
242 /* Invalidate all entries in TLB3 */
243 li r3, 0x1c
244 tlbivax 0,3
245 msync
246#ifdef CONFIG_SMP
247 tlbsync
248#endif /* CONFIG_SMP */
249#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
250#if defined(CONFIG_SMP)
251 rlwinm r8,r1,0,0,18
252 lwz r8,TI_CPU(r8)
253 oris r8,r8,10
254 mfmsr r10
255 SYNC
256 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
257 rlwinm r0,r0,0,28,26 /* clear DR */
258 mtmsr r0
259 SYNC_601
260 isync
261 lis r9,mmu_hash_lock@h
262 ori r9,r9,mmu_hash_lock@l
263 tophys(r9,r9)
26410: lwarx r7,0,r9
265 cmpwi 0,r7,0
266 bne- 10b
267 stwcx. r8,0,r9
268 bne- 10b
269 sync
270 tlbia
271 sync
272 TLBSYNC
273 li r0,0
274 stw r0,0(r9) /* clear mmu_hash_lock */
275 mtmsr r10
276 SYNC_601
277 isync
278#else /* CONFIG_SMP */
279 sync
280 tlbia
281 sync
282#endif /* CONFIG_SMP */
283#endif /* ! defined(CONFIG_40x) */
284 blr
285
286/*
287 * Flush MMU TLB for a particular address
288 */
289_GLOBAL(_tlbie)
290#if defined(CONFIG_40x)
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +1100291 /* We run the search with interrupts disabled because we have to change
292 * the PID and I don't want to preempt when that happens.
293 */
294 mfmsr r5
295 mfspr r6,SPRN_PID
296 wrteei 0
297 mtspr SPRN_PID,r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000298 tlbsx. r3, 0, r3
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +1100299 mtspr SPRN_PID,r6
300 wrtee r5
Paul Mackerras9994a332005-10-10 22:36:14 +1000301 bne 10f
302 sync
303 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
304 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
305 * the TLB entry. */
306 tlbwe r3, r3, TLB_TAG
307 isync
30810:
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +1100309
Paul Mackerras9994a332005-10-10 22:36:14 +1000310#elif defined(CONFIG_44x)
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +1100311 mfspr r5,SPRN_MMUCR
312 rlwimi r5,r4,0,24,31 /* Set TID */
Paul Mackerras9994a332005-10-10 22:36:14 +1000313
David Gibsonaa1cf632007-08-07 14:20:50 +1000314 /* We have to run the search with interrupts disabled, even critical
315 * and debug interrupts (in fact the only critical exceptions we have
316 * are debug and machine check). Otherwise an interrupt which causes
317 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +1100318 mfmsr r4
David Gibsonaa1cf632007-08-07 14:20:50 +1000319 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
320 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +1100321 andc r6,r4,r6
David Gibsonaa1cf632007-08-07 14:20:50 +1000322 mtmsr r6
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +1100323 mtspr SPRN_MMUCR,r5
Paul Mackerras9994a332005-10-10 22:36:14 +1000324 tlbsx. r3, 0, r3
Benjamin Herrenschmidte701d262007-10-30 09:46:06 +1100325 mtmsr r4
Paul Mackerras9994a332005-10-10 22:36:14 +1000326 bne 10f
327 sync
328 /* There are only 64 TLB entries, so r3 < 64,
329 * which means bit 22, is clear. Since 22 is
330 * the V bit in the TLB_PAGEID, loading this
331 * value will invalidate the TLB entry.
332 */
333 tlbwe r3, r3, PPC44x_TLB_PAGEID
334 isync
33510:
336#elif defined(CONFIG_FSL_BOOKE)
337 rlwinm r4, r3, 0, 0, 19
338 ori r5, r4, 0x08 /* TLBSEL = 1 */
339 ori r6, r4, 0x10 /* TLBSEL = 2 */
340 ori r7, r4, 0x18 /* TLBSEL = 3 */
341 tlbivax 0, r4
342 tlbivax 0, r5
343 tlbivax 0, r6
344 tlbivax 0, r7
345 msync
346#if defined(CONFIG_SMP)
347 tlbsync
348#endif /* CONFIG_SMP */
349#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
350#if defined(CONFIG_SMP)
351 rlwinm r8,r1,0,0,18
352 lwz r8,TI_CPU(r8)
353 oris r8,r8,11
354 mfmsr r10
355 SYNC
356 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
357 rlwinm r0,r0,0,28,26 /* clear DR */
358 mtmsr r0
359 SYNC_601
360 isync
361 lis r9,mmu_hash_lock@h
362 ori r9,r9,mmu_hash_lock@l
363 tophys(r9,r9)
36410: lwarx r7,0,r9
365 cmpwi 0,r7,0
366 bne- 10b
367 stwcx. r8,0,r9
368 bne- 10b
369 eieio
370 tlbie r3
371 sync
372 TLBSYNC
373 li r0,0
374 stw r0,0(r9) /* clear mmu_hash_lock */
375 mtmsr r10
376 SYNC_601
377 isync
378#else /* CONFIG_SMP */
379 tlbie r3
380 sync
381#endif /* CONFIG_SMP */
382#endif /* ! CONFIG_40x */
383 blr
384
385/*
386 * Flush instruction cache.
387 * This is a no-op on the 601.
388 */
389_GLOBAL(flush_instruction_cache)
390#if defined(CONFIG_8xx)
391 isync
392 lis r5, IDC_INVALL@h
393 mtspr SPRN_IC_CST, r5
394#elif defined(CONFIG_4xx)
395#ifdef CONFIG_403GCX
396 li r3, 512
397 mtctr r3
398 lis r4, KERNELBASE@h
3991: iccci 0, r4
400 addi r4, r4, 16
401 bdnz 1b
402#else
403 lis r3, KERNELBASE@h
404 iccci 0,r3
405#endif
406#elif CONFIG_FSL_BOOKE
407BEGIN_FTR_SECTION
408 mfspr r3,SPRN_L1CSR0
409 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
410 /* msync; isync recommended here */
411 mtspr SPRN_L1CSR0,r3
412 isync
413 blr
David Gibson4508dc22007-06-13 14:52:57 +1000414END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000415 mfspr r3,SPRN_L1CSR1
416 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
417 mtspr SPRN_L1CSR1,r3
418#else
419 mfspr r3,SPRN_PVR
420 rlwinm r3,r3,16,16,31
421 cmpwi 0,r3,1
422 beqlr /* for 601, do nothing */
423 /* 603/604 processor - use invalidate-all bit in HID0 */
424 mfspr r3,SPRN_HID0
425 ori r3,r3,HID0_ICFI
426 mtspr SPRN_HID0,r3
427#endif /* CONFIG_8xx/4xx */
428 isync
429 blr
430
431/*
432 * Write any modified data cache blocks out to memory
433 * and invalidate the corresponding instruction cache blocks.
434 * This is a no-op on the 601.
435 *
436 * flush_icache_range(unsigned long start, unsigned long stop)
437 */
David Gibson26ef5c02005-11-10 11:50:16 +1100438_GLOBAL(__flush_icache_range)
Paul Mackerras9994a332005-10-10 22:36:14 +1000439BEGIN_FTR_SECTION
440 blr /* for 601, do nothing */
David Gibson4508dc22007-06-13 14:52:57 +1000441END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000442 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000443 andc r3,r3,r5
444 subf r4,r3,r4
445 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000446 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000447 beqlr
448 mtctr r4
449 mr r6,r3
4501: dcbst 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000451 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000452 bdnz 1b
453 sync /* wait for dcbst's to get to ram */
454 mtctr r4
4552: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000456 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000457 bdnz 2b
458 sync /* additional sync needed on g4 */
459 isync
460 blr
461/*
462 * Write any modified data cache blocks out to memory.
463 * Does not invalidate the corresponding cache lines (especially for
464 * any corresponding instruction cache).
465 *
466 * clean_dcache_range(unsigned long start, unsigned long stop)
467 */
468_GLOBAL(clean_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000469 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000470 andc r3,r3,r5
471 subf r4,r3,r4
472 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000473 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000474 beqlr
475 mtctr r4
476
4771: dcbst 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000478 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000479 bdnz 1b
480 sync /* wait for dcbst's to get to ram */
481 blr
482
483/*
484 * Write any modified data cache blocks out to memory and invalidate them.
485 * Does not invalidate the corresponding instruction cache blocks.
486 *
487 * flush_dcache_range(unsigned long start, unsigned long stop)
488 */
489_GLOBAL(flush_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000490 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000491 andc r3,r3,r5
492 subf r4,r3,r4
493 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000494 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000495 beqlr
496 mtctr r4
497
4981: dcbf 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000499 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000500 bdnz 1b
501 sync /* wait for dcbst's to get to ram */
502 blr
503
504/*
505 * Like above, but invalidate the D-cache. This is used by the 8xx
506 * to invalidate the cache so the PPC core doesn't get stale data
507 * from the CPM (no cache snooping here :-).
508 *
509 * invalidate_dcache_range(unsigned long start, unsigned long stop)
510 */
511_GLOBAL(invalidate_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000512 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000513 andc r3,r3,r5
514 subf r4,r3,r4
515 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000516 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000517 beqlr
518 mtctr r4
519
5201: dcbi 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000521 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000522 bdnz 1b
523 sync /* wait for dcbi's to get to ram */
524 blr
525
Paul Mackerras9994a332005-10-10 22:36:14 +1000526/*
527 * Flush a particular page from the data cache to RAM.
528 * Note: this is necessary because the instruction cache does *not*
529 * snoop from the data cache.
530 * This is a no-op on the 601 which has a unified cache.
531 *
532 * void __flush_dcache_icache(void *page)
533 */
534_GLOBAL(__flush_dcache_icache)
535BEGIN_FTR_SECTION
David Gibson4508dc22007-06-13 14:52:57 +1000536 blr
537END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000538 rlwinm r3,r3,0,0,19 /* Get page base address */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000539 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
Paul Mackerras9994a332005-10-10 22:36:14 +1000540 mtctr r4
541 mr r6,r3
5420: dcbst 0,r3 /* Write line to ram */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000543 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000544 bdnz 0b
545 sync
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100546#ifndef CONFIG_44x
547 /* We don't flush the icache on 44x. Those have a virtual icache
548 * and we don't have access to the virtual address here (it's
549 * not the page vaddr but where it's mapped in user space). The
550 * flushing of the icache on these is handled elsewhere, when
551 * a change in the address space occurs, before returning to
552 * user space
553 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000554 mtctr r4
5551: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000556 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000557 bdnz 1b
558 sync
559 isync
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100560#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000561 blr
562
563/*
564 * Flush a particular page from the data cache to RAM, identified
565 * by its physical address. We turn off the MMU so we can just use
566 * the physical address (this may be a highmem page without a kernel
567 * mapping).
568 *
569 * void __flush_dcache_icache_phys(unsigned long physaddr)
570 */
571_GLOBAL(__flush_dcache_icache_phys)
572BEGIN_FTR_SECTION
573 blr /* for 601, do nothing */
David Gibson4508dc22007-06-13 14:52:57 +1000574END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000575 mfmsr r10
576 rlwinm r0,r10,0,28,26 /* clear DR */
577 mtmsr r0
578 isync
579 rlwinm r3,r3,0,0,19 /* Get page base address */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000580 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
Paul Mackerras9994a332005-10-10 22:36:14 +1000581 mtctr r4
582 mr r6,r3
5830: dcbst 0,r3 /* Write line to ram */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000584 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000585 bdnz 0b
586 sync
587 mtctr r4
5881: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000589 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000590 bdnz 1b
591 sync
592 mtmsr r10 /* restore DR */
593 isync
594 blr
595
596/*
597 * Clear pages using the dcbz instruction, which doesn't cause any
598 * memory traffic (except to write out any cache lines which get
599 * displaced). This only works on cacheable memory.
600 *
601 * void clear_pages(void *page, int order) ;
602 */
603_GLOBAL(clear_pages)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000604 li r0,4096/L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000605 slw r0,r0,r4
606 mtctr r0
607#ifdef CONFIG_8xx
608 li r4, 0
6091: stw r4, 0(r3)
610 stw r4, 4(r3)
611 stw r4, 8(r3)
612 stw r4, 12(r3)
613#else
6141: dcbz 0,r3
615#endif
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000616 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000617 bdnz 1b
618 blr
619
620/*
621 * Copy a whole page. We use the dcbz instruction on the destination
622 * to reduce memory traffic (it eliminates the unnecessary reads of
623 * the destination into cache). This requires that the destination
624 * is cacheable.
625 */
626#define COPY_16_BYTES \
627 lwz r6,4(r4); \
628 lwz r7,8(r4); \
629 lwz r8,12(r4); \
630 lwzu r9,16(r4); \
631 stw r6,4(r3); \
632 stw r7,8(r3); \
633 stw r8,12(r3); \
634 stwu r9,16(r3)
635
636_GLOBAL(copy_page)
637 addi r3,r3,-4
638 addi r4,r4,-4
639
640#ifdef CONFIG_8xx
641 /* don't use prefetch on 8xx */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000642 li r0,4096/L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000643 mtctr r0
6441: COPY_16_BYTES
645 bdnz 1b
646 blr
647
648#else /* not 8xx, we can prefetch */
649 li r5,4
650
651#if MAX_COPY_PREFETCH > 1
652 li r0,MAX_COPY_PREFETCH
653 li r11,4
654 mtctr r0
65511: dcbt r11,r4
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000656 addi r11,r11,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000657 bdnz 11b
658#else /* MAX_COPY_PREFETCH == 1 */
659 dcbt r5,r4
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000660 li r11,L1_CACHE_BYTES+4
Paul Mackerras9994a332005-10-10 22:36:14 +1000661#endif /* MAX_COPY_PREFETCH */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000662 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
Paul Mackerras9994a332005-10-10 22:36:14 +1000663 crclr 4*cr0+eq
6642:
665 mtctr r0
6661:
667 dcbt r11,r4
668 dcbz r5,r3
669 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000670#if L1_CACHE_BYTES >= 32
Paul Mackerras9994a332005-10-10 22:36:14 +1000671 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000672#if L1_CACHE_BYTES >= 64
Paul Mackerras9994a332005-10-10 22:36:14 +1000673 COPY_16_BYTES
674 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000675#if L1_CACHE_BYTES >= 128
Paul Mackerras9994a332005-10-10 22:36:14 +1000676 COPY_16_BYTES
677 COPY_16_BYTES
678 COPY_16_BYTES
679 COPY_16_BYTES
680#endif
681#endif
682#endif
683 bdnz 1b
684 beqlr
685 crnot 4*cr0+eq,4*cr0+eq
686 li r0,MAX_COPY_PREFETCH
687 li r11,4
688 b 2b
689#endif /* CONFIG_8xx */
690
691/*
692 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
693 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
694 */
695_GLOBAL(atomic_clear_mask)
69610: lwarx r5,0,r4
697 andc r5,r5,r3
698 PPC405_ERR77(0,r4)
699 stwcx. r5,0,r4
700 bne- 10b
701 blr
702_GLOBAL(atomic_set_mask)
70310: lwarx r5,0,r4
704 or r5,r5,r3
705 PPC405_ERR77(0,r4)
706 stwcx. r5,0,r4
707 bne- 10b
708 blr
709
710/*
Paul Mackerras9994a332005-10-10 22:36:14 +1000711 * Extended precision shifts.
712 *
713 * Updated to be valid for shift counts from 0 to 63 inclusive.
714 * -- Gabriel
715 *
716 * R3/R4 has 64 bit value
717 * R5 has shift count
718 * result in R3/R4
719 *
720 * ashrdi3: arithmetic right shift (sign propagation)
721 * lshrdi3: logical right shift
722 * ashldi3: left shift
723 */
724_GLOBAL(__ashrdi3)
725 subfic r6,r5,32
726 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
727 addi r7,r5,32 # could be xori, or addi with -32
728 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
729 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
730 sraw r7,r3,r7 # t2 = MSW >> (count-32)
731 or r4,r4,r6 # LSW |= t1
732 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
733 sraw r3,r3,r5 # MSW = MSW >> count
734 or r4,r4,r7 # LSW |= t2
735 blr
736
737_GLOBAL(__ashldi3)
738 subfic r6,r5,32
739 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
740 addi r7,r5,32 # could be xori, or addi with -32
741 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
742 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
743 or r3,r3,r6 # MSW |= t1
744 slw r4,r4,r5 # LSW = LSW << count
745 or r3,r3,r7 # MSW |= t2
746 blr
747
748_GLOBAL(__lshrdi3)
749 subfic r6,r5,32
750 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
751 addi r7,r5,32 # could be xori, or addi with -32
752 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
753 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
754 or r4,r4,r6 # LSW |= t1
755 srw r3,r3,r5 # MSW = MSW >> count
756 or r4,r4,r7 # LSW |= t2
757 blr
758
759_GLOBAL(abs)
760 srawi r4,r3,31
761 xor r3,r3,r4
762 sub r3,r3,r4
763 blr
764
Paul Mackerras9994a332005-10-10 22:36:14 +1000765/*
Paul Mackerras9994a332005-10-10 22:36:14 +1000766 * Create a kernel thread
767 * kernel_thread(fn, arg, flags)
768 */
769_GLOBAL(kernel_thread)
770 stwu r1,-16(r1)
771 stw r30,8(r1)
772 stw r31,12(r1)
773 mr r30,r3 /* function */
774 mr r31,r4 /* argument */
775 ori r3,r5,CLONE_VM /* flags */
776 oris r3,r3,CLONE_UNTRACED>>16
777 li r4,0 /* new sp (unused) */
778 li r0,__NR_clone
779 sc
780 cmpwi 0,r3,0 /* parent or child? */
781 bne 1f /* return if parent */
782 li r0,0 /* make top-level stack frame */
783 stwu r0,-16(r1)
784 mtlr r30 /* fn addr in lr */
785 mr r3,r31 /* load arg and call fn */
786 PPC440EP_ERR42
787 blrl
788 li r0,__NR_exit /* exit if function returns */
789 li r3,0
790 sc
7911: lwz r30,8(r1)
792 lwz r31,12(r1)
793 addi r1,r1,16
794 blr
795
Arnd Bergmann3db03b42006-10-02 02:18:31 -0700796_GLOBAL(kernel_execve)
Paul Mackerras9994a332005-10-10 22:36:14 +1000797 li r0,__NR_execve
798 sc
799 bnslr
800 neg r3,r3
801 blr
802
803/*
804 * This routine is just here to keep GCC happy - sigh...
805 */
806_GLOBAL(__main)
807 blr
Michael Ellerman3d1229d2005-11-14 23:35:00 +1100808
809#ifdef CONFIG_KEXEC
810 /*
811 * Must be relocatable PIC code callable as a C function.
812 */
813 .globl relocate_new_kernel
814relocate_new_kernel:
815 /* r3 = page_list */
816 /* r4 = reboot_code_buffer */
817 /* r5 = start_address */
818
819 li r0, 0
820
821 /*
822 * Set Machine Status Register to a known status,
823 * switch the MMU off and jump to 1: in a single step.
824 */
825
826 mr r8, r0
827 ori r8, r8, MSR_RI|MSR_ME
828 mtspr SPRN_SRR1, r8
829 addi r8, r4, 1f - relocate_new_kernel
830 mtspr SPRN_SRR0, r8
831 sync
832 rfi
833
8341:
835 /* from this point address translation is turned off */
836 /* and interrupts are disabled */
837
838 /* set a new stack at the bottom of our page... */
839 /* (not really needed now) */
840 addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */
841 stw r0, 0(r1)
842
843 /* Do the copies */
844 li r6, 0 /* checksum */
845 mr r0, r3
846 b 1f
847
8480: /* top, read another word for the indirection page */
849 lwzu r0, 4(r3)
850
8511:
852 /* is it a destination page? (r8) */
853 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
854 beq 2f
855
856 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
857 b 0b
858
8592: /* is it an indirection page? (r3) */
860 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
861 beq 2f
862
863 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
864 subi r3, r3, 4
865 b 0b
866
8672: /* are we done? */
868 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
869 beq 2f
870 b 3f
871
8722: /* is it a source page? (r9) */
873 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
874 beq 0b
875
876 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
877
878 li r7, PAGE_SIZE / 4
879 mtctr r7
880 subi r9, r9, 4
881 subi r8, r8, 4
8829:
883 lwzu r0, 4(r9) /* do the copy */
884 xor r6, r6, r0
885 stwu r0, 4(r8)
886 dcbst 0, r8
887 sync
888 icbi 0, r8
889 bdnz 9b
890
891 addi r9, r9, 4
892 addi r8, r8, 4
893 b 0b
894
8953:
896
897 /* To be certain of avoiding problems with self-modifying code
898 * execute a serializing instruction here.
899 */
900 isync
901 sync
902
903 /* jump to the entry point, usually the setup routine */
904 mtlr r5
905 blrl
906
9071: b 1b
908
909relocate_new_kernel_end:
910
911 .globl relocate_new_kernel_size
912relocate_new_kernel_size:
913 .long relocate_new_kernel_end - relocate_new_kernel
914#endif