blob: c74774e2175d9075a8a60a9b0636f18bf5fddc66 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
Michael Ellerman3d1229d2005-11-14 23:35:00 +11008 * kexec bits:
9 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
11 *
Paul Mackerras9994a332005-10-10 22:36:14 +100012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/sys.h>
21#include <asm/unistd.h>
22#include <asm/errno.h>
23#include <asm/reg.h>
24#include <asm/page.h>
25#include <asm/cache.h>
26#include <asm/cputable.h>
27#include <asm/mmu.h>
28#include <asm/ppc_asm.h>
29#include <asm/thread_info.h>
30#include <asm/asm-offsets.h>
Michael Ellerman3d1229d2005-11-14 23:35:00 +110031#include <asm/processor.h>
32#include <asm/kexec.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100033
34 .text
35
Paul Mackerras9994a332005-10-10 22:36:14 +100036/*
Paul Mackerrasf2783c12005-10-20 09:23:26 +100037 * This returns the high 64 bits of the product of two 64-bit numbers.
38 */
39_GLOBAL(mulhdu)
40 cmpwi r6,0
41 cmpwi cr1,r3,0
42 mr r10,r4
43 mulhwu r4,r4,r5
44 beq 1f
45 mulhwu r0,r10,r6
46 mullw r7,r10,r5
47 addc r7,r0,r7
48 addze r4,r4
491: beqlr cr1 /* all done if high part of A is 0 */
50 mr r10,r3
51 mullw r9,r3,r5
52 mulhwu r3,r3,r5
53 beq 2f
54 mullw r0,r10,r6
55 mulhwu r8,r10,r6
56 addc r7,r0,r7
57 adde r4,r4,r8
58 addze r3,r3
592: addc r4,r4,r9
60 addze r3,r3
61 blr
62
63/*
Paul Mackerras9994a332005-10-10 22:36:14 +100064 * sub_reloc_offset(x) returns x - reloc_offset().
65 */
66_GLOBAL(sub_reloc_offset)
67 mflr r0
68 bl 1f
691: mflr r5
70 lis r4,1b@ha
71 addi r4,r4,1b@l
72 subf r5,r4,r5
73 subf r3,r5,r3
74 mtlr r0
75 blr
76
77/*
78 * reloc_got2 runs through the .got2 section adding an offset
79 * to each entry.
80 */
81_GLOBAL(reloc_got2)
82 mflr r11
83 lis r7,__got2_start@ha
84 addi r7,r7,__got2_start@l
85 lis r8,__got2_end@ha
86 addi r8,r8,__got2_end@l
87 subf r8,r7,r8
88 srwi. r8,r8,2
89 beqlr
90 mtctr r8
91 bl 1f
921: mflr r0
93 lis r4,1b@ha
94 addi r4,r4,1b@l
95 subf r0,r4,r0
96 add r7,r0,r7
972: lwz r0,0(r7)
98 add r0,r0,r3
99 stw r0,0(r7)
100 addi r7,r7,4
101 bdnz 2b
102 mtlr r11
103 blr
104
105/*
106 * identify_cpu,
107 * called with r3 = data offset and r4 = CPU number
108 * doesn't change r3
109 */
110_GLOBAL(identify_cpu)
111 addis r8,r3,cpu_specs@ha
112 addi r8,r8,cpu_specs@l
113 mfpvr r7
1141:
115 lwz r5,CPU_SPEC_PVR_MASK(r8)
116 and r5,r5,r7
117 lwz r6,CPU_SPEC_PVR_VALUE(r8)
118 cmplw 0,r6,r5
119 beq 1f
120 addi r8,r8,CPU_SPEC_ENTRY_SIZE
121 b 1b
1221:
123 addis r6,r3,cur_cpu_spec@ha
124 addi r6,r6,cur_cpu_spec@l
125 sub r8,r8,r3
126 stw r8,0(r6)
127 blr
128
129/*
130 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
131 * and writes nop's over sections of code that don't apply for this cpu.
132 * r3 = data offset (not changed)
133 */
134_GLOBAL(do_cpu_ftr_fixups)
135 /* Get CPU 0 features */
136 addis r6,r3,cur_cpu_spec@ha
137 addi r6,r6,cur_cpu_spec@l
138 lwz r4,0(r6)
139 add r4,r4,r3
140 lwz r4,CPU_SPEC_FEATURES(r4)
141
142 /* Get the fixup table */
143 addis r6,r3,__start___ftr_fixup@ha
144 addi r6,r6,__start___ftr_fixup@l
145 addis r7,r3,__stop___ftr_fixup@ha
146 addi r7,r7,__stop___ftr_fixup@l
147
148 /* Do the fixup */
1491: cmplw 0,r6,r7
150 bgelr
151 addi r6,r6,16
152 lwz r8,-16(r6) /* mask */
153 and r8,r8,r4
154 lwz r9,-12(r6) /* value */
155 cmplw 0,r8,r9
156 beq 1b
157 lwz r8,-8(r6) /* section begin */
158 lwz r9,-4(r6) /* section end */
159 subf. r9,r8,r9
160 beq 1b
161 /* write nops over the section of code */
162 /* todo: if large section, add a branch at the start of it */
163 srwi r9,r9,2
164 mtctr r9
165 add r8,r8,r3
166 lis r0,0x60000000@h /* nop */
1673: stw r0,0(r8)
168 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
169 beq 2f
170 dcbst 0,r8 /* suboptimal, but simpler */
171 sync
172 icbi 0,r8
1732: addi r8,r8,4
174 bdnz 3b
175 sync /* additional sync needed on g4 */
176 isync
177 b 1b
178
179/*
180 * call_setup_cpu - call the setup_cpu function for this cpu
181 * r3 = data offset, r24 = cpu number
182 *
183 * Setup function is called with:
184 * r3 = data offset
185 * r4 = ptr to CPU spec (relocated)
186 */
187_GLOBAL(call_setup_cpu)
188 addis r4,r3,cur_cpu_spec@ha
189 addi r4,r4,cur_cpu_spec@l
190 lwz r4,0(r4)
191 add r4,r4,r3
192 lwz r5,CPU_SPEC_SETUP(r4)
Geoff Levandb26f1002006-05-19 14:24:18 +1000193 cmpwi 0,r5,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000194 add r5,r5,r3
195 beqlr
196 mtctr r5
197 bctr
198
199#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
200
201/* This gets called by via-pmu.c to switch the PLL selection
202 * on 750fx CPU. This function should really be moved to some
203 * other place (as most of the cpufreq code in via-pmu
204 */
205_GLOBAL(low_choose_750fx_pll)
206 /* Clear MSR:EE */
207 mfmsr r7
208 rlwinm r0,r7,0,17,15
209 mtmsr r0
210
211 /* If switching to PLL1, disable HID0:BTIC */
212 cmplwi cr0,r3,0
213 beq 1f
214 mfspr r5,SPRN_HID0
215 rlwinm r5,r5,0,27,25
216 sync
217 mtspr SPRN_HID0,r5
218 isync
219 sync
220
2211:
222 /* Calc new HID1 value */
223 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
224 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
225 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
226 or r4,r4,r5
227 mtspr SPRN_HID1,r4
228
229 /* Store new HID1 image */
230 rlwinm r6,r1,0,0,18
231 lwz r6,TI_CPU(r6)
232 slwi r6,r6,2
233 addis r6,r6,nap_save_hid1@ha
234 stw r4,nap_save_hid1@l(r6)
235
236 /* If switching to PLL0, enable HID0:BTIC */
237 cmplwi cr0,r3,0
238 bne 1f
239 mfspr r5,SPRN_HID0
240 ori r5,r5,HID0_BTIC
241 sync
242 mtspr SPRN_HID0,r5
243 isync
244 sync
245
2461:
247 /* Return */
248 mtmsr r7
249 blr
250
251_GLOBAL(low_choose_7447a_dfs)
252 /* Clear MSR:EE */
253 mfmsr r7
254 rlwinm r0,r7,0,17,15
255 mtmsr r0
256
257 /* Calc new HID1 value */
258 mfspr r4,SPRN_HID1
259 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
260 sync
261 mtspr SPRN_HID1,r4
262 sync
263 isync
264
265 /* Return */
266 mtmsr r7
267 blr
268
269#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
270
271/*
272 * complement mask on the msr then "or" some values on.
273 * _nmask_and_or_msr(nmask, value_to_or)
274 */
275_GLOBAL(_nmask_and_or_msr)
276 mfmsr r0 /* Get current msr */
277 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
278 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
279 SYNC /* Some chip revs have problems here... */
280 mtmsr r0 /* Update machine state */
281 isync
282 blr /* Done */
283
284
285/*
286 * Flush MMU TLB
287 */
288_GLOBAL(_tlbia)
289#if defined(CONFIG_40x)
290 sync /* Flush to memory before changing mapping */
291 tlbia
292 isync /* Flush shadow TLB */
293#elif defined(CONFIG_44x)
294 li r3,0
295 sync
296
297 /* Load high watermark */
298 lis r4,tlb_44x_hwater@ha
299 lwz r5,tlb_44x_hwater@l(r4)
300
3011: tlbwe r3,r3,PPC44x_TLB_PAGEID
302 addi r3,r3,1
303 cmpw 0,r3,r5
304 ble 1b
305
306 isync
307#elif defined(CONFIG_FSL_BOOKE)
308 /* Invalidate all entries in TLB0 */
309 li r3, 0x04
310 tlbivax 0,3
311 /* Invalidate all entries in TLB1 */
312 li r3, 0x0c
313 tlbivax 0,3
314 /* Invalidate all entries in TLB2 */
315 li r3, 0x14
316 tlbivax 0,3
317 /* Invalidate all entries in TLB3 */
318 li r3, 0x1c
319 tlbivax 0,3
320 msync
321#ifdef CONFIG_SMP
322 tlbsync
323#endif /* CONFIG_SMP */
324#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
325#if defined(CONFIG_SMP)
326 rlwinm r8,r1,0,0,18
327 lwz r8,TI_CPU(r8)
328 oris r8,r8,10
329 mfmsr r10
330 SYNC
331 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
332 rlwinm r0,r0,0,28,26 /* clear DR */
333 mtmsr r0
334 SYNC_601
335 isync
336 lis r9,mmu_hash_lock@h
337 ori r9,r9,mmu_hash_lock@l
338 tophys(r9,r9)
33910: lwarx r7,0,r9
340 cmpwi 0,r7,0
341 bne- 10b
342 stwcx. r8,0,r9
343 bne- 10b
344 sync
345 tlbia
346 sync
347 TLBSYNC
348 li r0,0
349 stw r0,0(r9) /* clear mmu_hash_lock */
350 mtmsr r10
351 SYNC_601
352 isync
353#else /* CONFIG_SMP */
354 sync
355 tlbia
356 sync
357#endif /* CONFIG_SMP */
358#endif /* ! defined(CONFIG_40x) */
359 blr
360
361/*
362 * Flush MMU TLB for a particular address
363 */
364_GLOBAL(_tlbie)
365#if defined(CONFIG_40x)
366 tlbsx. r3, 0, r3
367 bne 10f
368 sync
369 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
370 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
371 * the TLB entry. */
372 tlbwe r3, r3, TLB_TAG
373 isync
37410:
375#elif defined(CONFIG_44x)
376 mfspr r4,SPRN_MMUCR
377 mfspr r5,SPRN_PID /* Get PID */
378 rlwimi r4,r5,0,24,31 /* Set TID */
379 mtspr SPRN_MMUCR,r4
380
381 tlbsx. r3, 0, r3
382 bne 10f
383 sync
384 /* There are only 64 TLB entries, so r3 < 64,
385 * which means bit 22, is clear. Since 22 is
386 * the V bit in the TLB_PAGEID, loading this
387 * value will invalidate the TLB entry.
388 */
389 tlbwe r3, r3, PPC44x_TLB_PAGEID
390 isync
39110:
392#elif defined(CONFIG_FSL_BOOKE)
393 rlwinm r4, r3, 0, 0, 19
394 ori r5, r4, 0x08 /* TLBSEL = 1 */
395 ori r6, r4, 0x10 /* TLBSEL = 2 */
396 ori r7, r4, 0x18 /* TLBSEL = 3 */
397 tlbivax 0, r4
398 tlbivax 0, r5
399 tlbivax 0, r6
400 tlbivax 0, r7
401 msync
402#if defined(CONFIG_SMP)
403 tlbsync
404#endif /* CONFIG_SMP */
405#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
406#if defined(CONFIG_SMP)
407 rlwinm r8,r1,0,0,18
408 lwz r8,TI_CPU(r8)
409 oris r8,r8,11
410 mfmsr r10
411 SYNC
412 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
413 rlwinm r0,r0,0,28,26 /* clear DR */
414 mtmsr r0
415 SYNC_601
416 isync
417 lis r9,mmu_hash_lock@h
418 ori r9,r9,mmu_hash_lock@l
419 tophys(r9,r9)
42010: lwarx r7,0,r9
421 cmpwi 0,r7,0
422 bne- 10b
423 stwcx. r8,0,r9
424 bne- 10b
425 eieio
426 tlbie r3
427 sync
428 TLBSYNC
429 li r0,0
430 stw r0,0(r9) /* clear mmu_hash_lock */
431 mtmsr r10
432 SYNC_601
433 isync
434#else /* CONFIG_SMP */
435 tlbie r3
436 sync
437#endif /* CONFIG_SMP */
438#endif /* ! CONFIG_40x */
439 blr
440
441/*
442 * Flush instruction cache.
443 * This is a no-op on the 601.
444 */
445_GLOBAL(flush_instruction_cache)
446#if defined(CONFIG_8xx)
447 isync
448 lis r5, IDC_INVALL@h
449 mtspr SPRN_IC_CST, r5
450#elif defined(CONFIG_4xx)
451#ifdef CONFIG_403GCX
452 li r3, 512
453 mtctr r3
454 lis r4, KERNELBASE@h
4551: iccci 0, r4
456 addi r4, r4, 16
457 bdnz 1b
458#else
459 lis r3, KERNELBASE@h
460 iccci 0,r3
461#endif
462#elif CONFIG_FSL_BOOKE
463BEGIN_FTR_SECTION
464 mfspr r3,SPRN_L1CSR0
465 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
466 /* msync; isync recommended here */
467 mtspr SPRN_L1CSR0,r3
468 isync
469 blr
470END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
471 mfspr r3,SPRN_L1CSR1
472 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
473 mtspr SPRN_L1CSR1,r3
474#else
475 mfspr r3,SPRN_PVR
476 rlwinm r3,r3,16,16,31
477 cmpwi 0,r3,1
478 beqlr /* for 601, do nothing */
479 /* 603/604 processor - use invalidate-all bit in HID0 */
480 mfspr r3,SPRN_HID0
481 ori r3,r3,HID0_ICFI
482 mtspr SPRN_HID0,r3
483#endif /* CONFIG_8xx/4xx */
484 isync
485 blr
486
487/*
488 * Write any modified data cache blocks out to memory
489 * and invalidate the corresponding instruction cache blocks.
490 * This is a no-op on the 601.
491 *
492 * flush_icache_range(unsigned long start, unsigned long stop)
493 */
David Gibson26ef5c02005-11-10 11:50:16 +1100494_GLOBAL(__flush_icache_range)
Paul Mackerras9994a332005-10-10 22:36:14 +1000495BEGIN_FTR_SECTION
496 blr /* for 601, do nothing */
497END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000498 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000499 andc r3,r3,r5
500 subf r4,r3,r4
501 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000502 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000503 beqlr
504 mtctr r4
505 mr r6,r3
5061: dcbst 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000507 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000508 bdnz 1b
509 sync /* wait for dcbst's to get to ram */
510 mtctr r4
5112: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000512 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000513 bdnz 2b
514 sync /* additional sync needed on g4 */
515 isync
516 blr
517/*
518 * Write any modified data cache blocks out to memory.
519 * Does not invalidate the corresponding cache lines (especially for
520 * any corresponding instruction cache).
521 *
522 * clean_dcache_range(unsigned long start, unsigned long stop)
523 */
524_GLOBAL(clean_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000525 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000526 andc r3,r3,r5
527 subf r4,r3,r4
528 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000529 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000530 beqlr
531 mtctr r4
532
5331: dcbst 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000534 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000535 bdnz 1b
536 sync /* wait for dcbst's to get to ram */
537 blr
538
539/*
540 * Write any modified data cache blocks out to memory and invalidate them.
541 * Does not invalidate the corresponding instruction cache blocks.
542 *
543 * flush_dcache_range(unsigned long start, unsigned long stop)
544 */
545_GLOBAL(flush_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000546 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000547 andc r3,r3,r5
548 subf r4,r3,r4
549 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000550 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000551 beqlr
552 mtctr r4
553
5541: dcbf 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000555 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000556 bdnz 1b
557 sync /* wait for dcbst's to get to ram */
558 blr
559
560/*
561 * Like above, but invalidate the D-cache. This is used by the 8xx
562 * to invalidate the cache so the PPC core doesn't get stale data
563 * from the CPM (no cache snooping here :-).
564 *
565 * invalidate_dcache_range(unsigned long start, unsigned long stop)
566 */
567_GLOBAL(invalidate_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000568 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000569 andc r3,r3,r5
570 subf r4,r3,r4
571 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000572 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000573 beqlr
574 mtctr r4
575
5761: dcbi 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000577 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000578 bdnz 1b
579 sync /* wait for dcbi's to get to ram */
580 blr
581
Paul Mackerras9994a332005-10-10 22:36:14 +1000582/*
583 * Flush a particular page from the data cache to RAM.
584 * Note: this is necessary because the instruction cache does *not*
585 * snoop from the data cache.
586 * This is a no-op on the 601 which has a unified cache.
587 *
588 * void __flush_dcache_icache(void *page)
589 */
590_GLOBAL(__flush_dcache_icache)
591BEGIN_FTR_SECTION
592 blr /* for 601, do nothing */
593END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
594 rlwinm r3,r3,0,0,19 /* Get page base address */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000595 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
Paul Mackerras9994a332005-10-10 22:36:14 +1000596 mtctr r4
597 mr r6,r3
5980: dcbst 0,r3 /* Write line to ram */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000599 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000600 bdnz 0b
601 sync
602 mtctr r4
6031: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000604 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000605 bdnz 1b
606 sync
607 isync
608 blr
609
610/*
611 * Flush a particular page from the data cache to RAM, identified
612 * by its physical address. We turn off the MMU so we can just use
613 * the physical address (this may be a highmem page without a kernel
614 * mapping).
615 *
616 * void __flush_dcache_icache_phys(unsigned long physaddr)
617 */
618_GLOBAL(__flush_dcache_icache_phys)
619BEGIN_FTR_SECTION
620 blr /* for 601, do nothing */
621END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
622 mfmsr r10
623 rlwinm r0,r10,0,28,26 /* clear DR */
624 mtmsr r0
625 isync
626 rlwinm r3,r3,0,0,19 /* Get page base address */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000627 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
Paul Mackerras9994a332005-10-10 22:36:14 +1000628 mtctr r4
629 mr r6,r3
6300: dcbst 0,r3 /* Write line to ram */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000631 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000632 bdnz 0b
633 sync
634 mtctr r4
6351: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000636 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000637 bdnz 1b
638 sync
639 mtmsr r10 /* restore DR */
640 isync
641 blr
642
643/*
644 * Clear pages using the dcbz instruction, which doesn't cause any
645 * memory traffic (except to write out any cache lines which get
646 * displaced). This only works on cacheable memory.
647 *
648 * void clear_pages(void *page, int order) ;
649 */
650_GLOBAL(clear_pages)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000651 li r0,4096/L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000652 slw r0,r0,r4
653 mtctr r0
654#ifdef CONFIG_8xx
655 li r4, 0
6561: stw r4, 0(r3)
657 stw r4, 4(r3)
658 stw r4, 8(r3)
659 stw r4, 12(r3)
660#else
6611: dcbz 0,r3
662#endif
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000663 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000664 bdnz 1b
665 blr
666
667/*
668 * Copy a whole page. We use the dcbz instruction on the destination
669 * to reduce memory traffic (it eliminates the unnecessary reads of
670 * the destination into cache). This requires that the destination
671 * is cacheable.
672 */
673#define COPY_16_BYTES \
674 lwz r6,4(r4); \
675 lwz r7,8(r4); \
676 lwz r8,12(r4); \
677 lwzu r9,16(r4); \
678 stw r6,4(r3); \
679 stw r7,8(r3); \
680 stw r8,12(r3); \
681 stwu r9,16(r3)
682
683_GLOBAL(copy_page)
684 addi r3,r3,-4
685 addi r4,r4,-4
686
687#ifdef CONFIG_8xx
688 /* don't use prefetch on 8xx */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000689 li r0,4096/L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000690 mtctr r0
6911: COPY_16_BYTES
692 bdnz 1b
693 blr
694
695#else /* not 8xx, we can prefetch */
696 li r5,4
697
698#if MAX_COPY_PREFETCH > 1
699 li r0,MAX_COPY_PREFETCH
700 li r11,4
701 mtctr r0
70211: dcbt r11,r4
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000703 addi r11,r11,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000704 bdnz 11b
705#else /* MAX_COPY_PREFETCH == 1 */
706 dcbt r5,r4
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000707 li r11,L1_CACHE_BYTES+4
Paul Mackerras9994a332005-10-10 22:36:14 +1000708#endif /* MAX_COPY_PREFETCH */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000709 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
Paul Mackerras9994a332005-10-10 22:36:14 +1000710 crclr 4*cr0+eq
7112:
712 mtctr r0
7131:
714 dcbt r11,r4
715 dcbz r5,r3
716 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000717#if L1_CACHE_BYTES >= 32
Paul Mackerras9994a332005-10-10 22:36:14 +1000718 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000719#if L1_CACHE_BYTES >= 64
Paul Mackerras9994a332005-10-10 22:36:14 +1000720 COPY_16_BYTES
721 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000722#if L1_CACHE_BYTES >= 128
Paul Mackerras9994a332005-10-10 22:36:14 +1000723 COPY_16_BYTES
724 COPY_16_BYTES
725 COPY_16_BYTES
726 COPY_16_BYTES
727#endif
728#endif
729#endif
730 bdnz 1b
731 beqlr
732 crnot 4*cr0+eq,4*cr0+eq
733 li r0,MAX_COPY_PREFETCH
734 li r11,4
735 b 2b
736#endif /* CONFIG_8xx */
737
738/*
739 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
740 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
741 */
742_GLOBAL(atomic_clear_mask)
74310: lwarx r5,0,r4
744 andc r5,r5,r3
745 PPC405_ERR77(0,r4)
746 stwcx. r5,0,r4
747 bne- 10b
748 blr
749_GLOBAL(atomic_set_mask)
75010: lwarx r5,0,r4
751 or r5,r5,r3
752 PPC405_ERR77(0,r4)
753 stwcx. r5,0,r4
754 bne- 10b
755 blr
756
757/*
Paul Mackerras9994a332005-10-10 22:36:14 +1000758 * Extended precision shifts.
759 *
760 * Updated to be valid for shift counts from 0 to 63 inclusive.
761 * -- Gabriel
762 *
763 * R3/R4 has 64 bit value
764 * R5 has shift count
765 * result in R3/R4
766 *
767 * ashrdi3: arithmetic right shift (sign propagation)
768 * lshrdi3: logical right shift
769 * ashldi3: left shift
770 */
771_GLOBAL(__ashrdi3)
772 subfic r6,r5,32
773 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
774 addi r7,r5,32 # could be xori, or addi with -32
775 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
776 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
777 sraw r7,r3,r7 # t2 = MSW >> (count-32)
778 or r4,r4,r6 # LSW |= t1
779 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
780 sraw r3,r3,r5 # MSW = MSW >> count
781 or r4,r4,r7 # LSW |= t2
782 blr
783
784_GLOBAL(__ashldi3)
785 subfic r6,r5,32
786 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
787 addi r7,r5,32 # could be xori, or addi with -32
788 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
789 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
790 or r3,r3,r6 # MSW |= t1
791 slw r4,r4,r5 # LSW = LSW << count
792 or r3,r3,r7 # MSW |= t2
793 blr
794
795_GLOBAL(__lshrdi3)
796 subfic r6,r5,32
797 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
798 addi r7,r5,32 # could be xori, or addi with -32
799 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
800 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
801 or r4,r4,r6 # LSW |= t1
802 srw r3,r3,r5 # MSW = MSW >> count
803 or r4,r4,r7 # LSW |= t2
804 blr
805
806_GLOBAL(abs)
807 srawi r4,r3,31
808 xor r3,r3,r4
809 sub r3,r3,r4
810 blr
811
812_GLOBAL(_get_SP)
813 mr r3,r1 /* Close enough */
814 blr
815
816/*
Paul Mackerras9994a332005-10-10 22:36:14 +1000817 * Create a kernel thread
818 * kernel_thread(fn, arg, flags)
819 */
820_GLOBAL(kernel_thread)
821 stwu r1,-16(r1)
822 stw r30,8(r1)
823 stw r31,12(r1)
824 mr r30,r3 /* function */
825 mr r31,r4 /* argument */
826 ori r3,r5,CLONE_VM /* flags */
827 oris r3,r3,CLONE_UNTRACED>>16
828 li r4,0 /* new sp (unused) */
829 li r0,__NR_clone
830 sc
831 cmpwi 0,r3,0 /* parent or child? */
832 bne 1f /* return if parent */
833 li r0,0 /* make top-level stack frame */
834 stwu r0,-16(r1)
835 mtlr r30 /* fn addr in lr */
836 mr r3,r31 /* load arg and call fn */
837 PPC440EP_ERR42
838 blrl
839 li r0,__NR_exit /* exit if function returns */
840 li r3,0
841 sc
8421: lwz r30,8(r1)
843 lwz r31,12(r1)
844 addi r1,r1,16
845 blr
846
847_GLOBAL(execve)
848 li r0,__NR_execve
849 sc
850 bnslr
851 neg r3,r3
852 blr
853
854/*
855 * This routine is just here to keep GCC happy - sigh...
856 */
857_GLOBAL(__main)
858 blr
Michael Ellerman3d1229d2005-11-14 23:35:00 +1100859
860#ifdef CONFIG_KEXEC
861 /*
862 * Must be relocatable PIC code callable as a C function.
863 */
864 .globl relocate_new_kernel
865relocate_new_kernel:
866 /* r3 = page_list */
867 /* r4 = reboot_code_buffer */
868 /* r5 = start_address */
869
870 li r0, 0
871
872 /*
873 * Set Machine Status Register to a known status,
874 * switch the MMU off and jump to 1: in a single step.
875 */
876
877 mr r8, r0
878 ori r8, r8, MSR_RI|MSR_ME
879 mtspr SPRN_SRR1, r8
880 addi r8, r4, 1f - relocate_new_kernel
881 mtspr SPRN_SRR0, r8
882 sync
883 rfi
884
8851:
886 /* from this point address translation is turned off */
887 /* and interrupts are disabled */
888
889 /* set a new stack at the bottom of our page... */
890 /* (not really needed now) */
891 addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */
892 stw r0, 0(r1)
893
894 /* Do the copies */
895 li r6, 0 /* checksum */
896 mr r0, r3
897 b 1f
898
8990: /* top, read another word for the indirection page */
900 lwzu r0, 4(r3)
901
9021:
903 /* is it a destination page? (r8) */
904 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
905 beq 2f
906
907 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
908 b 0b
909
9102: /* is it an indirection page? (r3) */
911 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
912 beq 2f
913
914 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
915 subi r3, r3, 4
916 b 0b
917
9182: /* are we done? */
919 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
920 beq 2f
921 b 3f
922
9232: /* is it a source page? (r9) */
924 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
925 beq 0b
926
927 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
928
929 li r7, PAGE_SIZE / 4
930 mtctr r7
931 subi r9, r9, 4
932 subi r8, r8, 4
9339:
934 lwzu r0, 4(r9) /* do the copy */
935 xor r6, r6, r0
936 stwu r0, 4(r8)
937 dcbst 0, r8
938 sync
939 icbi 0, r8
940 bdnz 9b
941
942 addi r9, r9, 4
943 addi r8, r8, 4
944 b 0b
945
9463:
947
948 /* To be certain of avoiding problems with self-modifying code
949 * execute a serializing instruction here.
950 */
951 isync
952 sync
953
954 /* jump to the entry point, usually the setup routine */
955 mtlr r5
956 blrl
957
9581: b 1b
959
960relocate_new_kernel_end:
961
962 .globl relocate_new_kernel_size
963relocate_new_kernel_size:
964 .long relocate_new_kernel_end - relocate_new_kernel
965#endif