blob: 777d999f563bb377bff3217358aacdc7667f1fd4 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
Michael Ellerman3d1229d2005-11-14 23:35:00 +11008 * kexec bits:
9 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
Suzuki Poulose674bfa42011-07-18 03:29:20 +000011 * PPC44x port. Copyright (C) 2011, IBM Corporation
12 * Author: Suzuki Poulose <suzuki@in.ibm.com>
Michael Ellerman3d1229d2005-11-14 23:35:00 +110013 *
Paul Mackerras9994a332005-10-10 22:36:14 +100014 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/sys.h>
22#include <asm/unistd.h>
23#include <asm/errno.h>
24#include <asm/reg.h>
25#include <asm/page.h>
26#include <asm/cache.h>
27#include <asm/cputable.h>
28#include <asm/mmu.h>
29#include <asm/ppc_asm.h>
30#include <asm/thread_info.h>
31#include <asm/asm-offsets.h>
Michael Ellerman3d1229d2005-11-14 23:35:00 +110032#include <asm/processor.h>
33#include <asm/kexec.h>
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +000034#include <asm/bug.h>
Stephen Rothwell46f52212010-11-18 15:06:17 +000035#include <asm/ptrace.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100036
37 .text
38
Kumar Gala85218822008-04-28 16:21:22 +100039_GLOBAL(call_do_softirq)
40 mflr r0
41 stw r0,4(r1)
42 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
43 mr r1,r3
44 bl __do_softirq
45 lwz r1,0(r1)
46 lwz r0,4(r1)
47 mtlr r0
48 blr
49
50_GLOBAL(call_handle_irq)
51 mflr r0
52 stw r0,4(r1)
53 mtctr r6
54 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
55 mr r1,r5
56 bctrl
57 lwz r1,0(r1)
58 lwz r0,4(r1)
59 mtlr r0
60 blr
Kumar Gala85218822008-04-28 16:21:22 +100061
Paul Mackerras9994a332005-10-10 22:36:14 +100062/*
Paul Mackerrasf2783c12005-10-20 09:23:26 +100063 * This returns the high 64 bits of the product of two 64-bit numbers.
64 */
65_GLOBAL(mulhdu)
66 cmpwi r6,0
67 cmpwi cr1,r3,0
68 mr r10,r4
69 mulhwu r4,r4,r5
70 beq 1f
71 mulhwu r0,r10,r6
72 mullw r7,r10,r5
73 addc r7,r0,r7
74 addze r4,r4
751: beqlr cr1 /* all done if high part of A is 0 */
76 mr r10,r3
77 mullw r9,r3,r5
78 mulhwu r3,r3,r5
79 beq 2f
80 mullw r0,r10,r6
81 mulhwu r8,r10,r6
82 addc r7,r0,r7
83 adde r4,r4,r8
84 addze r3,r3
852: addc r4,r4,r9
86 addze r3,r3
87 blr
88
89/*
Paul Mackerras9994a332005-10-10 22:36:14 +100090 * sub_reloc_offset(x) returns x - reloc_offset().
91 */
92_GLOBAL(sub_reloc_offset)
93 mflr r0
94 bl 1f
951: mflr r5
96 lis r4,1b@ha
97 addi r4,r4,1b@l
98 subf r5,r4,r5
99 subf r3,r5,r3
100 mtlr r0
101 blr
102
103/*
104 * reloc_got2 runs through the .got2 section adding an offset
105 * to each entry.
106 */
107_GLOBAL(reloc_got2)
108 mflr r11
109 lis r7,__got2_start@ha
110 addi r7,r7,__got2_start@l
111 lis r8,__got2_end@ha
112 addi r8,r8,__got2_end@l
113 subf r8,r7,r8
114 srwi. r8,r8,2
115 beqlr
116 mtctr r8
117 bl 1f
1181: mflr r0
119 lis r4,1b@ha
120 addi r4,r4,1b@l
121 subf r0,r4,r0
122 add r7,r0,r7
1232: lwz r0,0(r7)
124 add r0,r0,r3
125 stw r0,0(r7)
126 addi r7,r7,4
127 bdnz 2b
128 mtlr r11
129 blr
130
131/*
Paul Mackerras9994a332005-10-10 22:36:14 +1000132 * call_setup_cpu - call the setup_cpu function for this cpu
133 * r3 = data offset, r24 = cpu number
134 *
135 * Setup function is called with:
136 * r3 = data offset
137 * r4 = ptr to CPU spec (relocated)
138 */
139_GLOBAL(call_setup_cpu)
140 addis r4,r3,cur_cpu_spec@ha
141 addi r4,r4,cur_cpu_spec@l
142 lwz r4,0(r4)
143 add r4,r4,r3
144 lwz r5,CPU_SPEC_SETUP(r4)
Geoff Levandb26f1002006-05-19 14:24:18 +1000145 cmpwi 0,r5,0
Paul Mackerras9994a332005-10-10 22:36:14 +1000146 add r5,r5,r3
147 beqlr
148 mtctr r5
149 bctr
150
151#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
152
153/* This gets called by via-pmu.c to switch the PLL selection
154 * on 750fx CPU. This function should really be moved to some
155 * other place (as most of the cpufreq code in via-pmu
156 */
157_GLOBAL(low_choose_750fx_pll)
158 /* Clear MSR:EE */
159 mfmsr r7
160 rlwinm r0,r7,0,17,15
161 mtmsr r0
162
163 /* If switching to PLL1, disable HID0:BTIC */
164 cmplwi cr0,r3,0
165 beq 1f
166 mfspr r5,SPRN_HID0
167 rlwinm r5,r5,0,27,25
168 sync
169 mtspr SPRN_HID0,r5
170 isync
171 sync
172
1731:
174 /* Calc new HID1 value */
175 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
176 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
177 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
178 or r4,r4,r5
179 mtspr SPRN_HID1,r4
180
181 /* Store new HID1 image */
Stuart Yoder9778b692012-07-05 04:41:35 +0000182 CURRENT_THREAD_INFO(r6, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000183 lwz r6,TI_CPU(r6)
184 slwi r6,r6,2
185 addis r6,r6,nap_save_hid1@ha
186 stw r4,nap_save_hid1@l(r6)
187
188 /* If switching to PLL0, enable HID0:BTIC */
189 cmplwi cr0,r3,0
190 bne 1f
191 mfspr r5,SPRN_HID0
192 ori r5,r5,HID0_BTIC
193 sync
194 mtspr SPRN_HID0,r5
195 isync
196 sync
197
1981:
199 /* Return */
200 mtmsr r7
201 blr
202
203_GLOBAL(low_choose_7447a_dfs)
204 /* Clear MSR:EE */
205 mfmsr r7
206 rlwinm r0,r7,0,17,15
207 mtmsr r0
208
209 /* Calc new HID1 value */
210 mfspr r4,SPRN_HID1
211 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
212 sync
213 mtspr SPRN_HID1,r4
214 sync
215 isync
216
217 /* Return */
218 mtmsr r7
219 blr
220
221#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
222
223/*
224 * complement mask on the msr then "or" some values on.
225 * _nmask_and_or_msr(nmask, value_to_or)
226 */
227_GLOBAL(_nmask_and_or_msr)
228 mfmsr r0 /* Get current msr */
229 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
230 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
231 SYNC /* Some chip revs have problems here... */
232 mtmsr r0 /* Update machine state */
233 isync
234 blr /* Done */
235
Benjamin Herrenschmidt9dae8af2007-12-21 15:39:26 +1100236#ifdef CONFIG_40x
237
238/*
239 * Do an IO access in real mode
240 */
241_GLOBAL(real_readb)
242 mfmsr r7
243 ori r0,r7,MSR_DR
244 xori r0,r0,MSR_DR
245 sync
246 mtmsr r0
247 sync
248 isync
249 lbz r3,0(r3)
250 sync
251 mtmsr r7
252 sync
253 isync
254 blr
255
256 /*
257 * Do an IO access in real mode
258 */
259_GLOBAL(real_writeb)
260 mfmsr r7
261 ori r0,r7,MSR_DR
262 xori r0,r0,MSR_DR
263 sync
264 mtmsr r0
265 sync
266 isync
267 stb r3,0(r4)
268 sync
269 mtmsr r7
270 sync
271 isync
272 blr
273
274#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000275
Kumar Gala0ba34182008-07-15 16:12:25 -0500276
Paul Mackerras9994a332005-10-10 22:36:14 +1000277/*
278 * Flush instruction cache.
279 * This is a no-op on the 601.
280 */
281_GLOBAL(flush_instruction_cache)
282#if defined(CONFIG_8xx)
283 isync
284 lis r5, IDC_INVALL@h
285 mtspr SPRN_IC_CST, r5
286#elif defined(CONFIG_4xx)
287#ifdef CONFIG_403GCX
288 li r3, 512
289 mtctr r3
290 lis r4, KERNELBASE@h
2911: iccci 0, r4
292 addi r4, r4, 16
293 bdnz 1b
294#else
295 lis r3, KERNELBASE@h
296 iccci 0,r3
297#endif
298#elif CONFIG_FSL_BOOKE
299BEGIN_FTR_SECTION
300 mfspr r3,SPRN_L1CSR0
301 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
302 /* msync; isync recommended here */
303 mtspr SPRN_L1CSR0,r3
304 isync
305 blr
David Gibson4508dc22007-06-13 14:52:57 +1000306END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000307 mfspr r3,SPRN_L1CSR1
308 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
309 mtspr SPRN_L1CSR1,r3
310#else
311 mfspr r3,SPRN_PVR
312 rlwinm r3,r3,16,16,31
313 cmpwi 0,r3,1
314 beqlr /* for 601, do nothing */
315 /* 603/604 processor - use invalidate-all bit in HID0 */
316 mfspr r3,SPRN_HID0
317 ori r3,r3,HID0_ICFI
318 mtspr SPRN_HID0,r3
319#endif /* CONFIG_8xx/4xx */
320 isync
321 blr
322
323/*
324 * Write any modified data cache blocks out to memory
325 * and invalidate the corresponding instruction cache blocks.
326 * This is a no-op on the 601.
327 *
328 * flush_icache_range(unsigned long start, unsigned long stop)
329 */
Kevin Hao3b04c302013-08-06 18:23:31 +0800330_KPROBE(flush_icache_range)
Paul Mackerras9994a332005-10-10 22:36:14 +1000331BEGIN_FTR_SECTION
Kevin Haoabb29c32013-08-06 18:23:30 +0800332 isync
Paul Mackerras9994a332005-10-10 22:36:14 +1000333 blr /* for 601, do nothing */
David Gibson4508dc22007-06-13 14:52:57 +1000334END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000335 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000336 andc r3,r3,r5
337 subf r4,r3,r4
338 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000339 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000340 beqlr
341 mtctr r4
342 mr r6,r3
3431: dcbst 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000344 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000345 bdnz 1b
346 sync /* wait for dcbst's to get to ram */
Josh Boyer14d75752009-08-19 04:27:53 +0000347#ifndef CONFIG_44x
Paul Mackerras9994a332005-10-10 22:36:14 +1000348 mtctr r4
3492: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000350 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000351 bdnz 2b
Josh Boyer14d75752009-08-19 04:27:53 +0000352#else
353 /* Flash invalidate on 44x because we are passed kmapped addresses and
354 this doesn't work for userspace pages due to the virtually tagged
355 icache. Sigh. */
356 iccci 0, r0
357#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000358 sync /* additional sync needed on g4 */
359 isync
360 blr
361/*
362 * Write any modified data cache blocks out to memory.
363 * Does not invalidate the corresponding cache lines (especially for
364 * any corresponding instruction cache).
365 *
366 * clean_dcache_range(unsigned long start, unsigned long stop)
367 */
368_GLOBAL(clean_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000369 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000370 andc r3,r3,r5
371 subf r4,r3,r4
372 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000373 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000374 beqlr
375 mtctr r4
376
3771: dcbst 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000378 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000379 bdnz 1b
380 sync /* wait for dcbst's to get to ram */
381 blr
382
383/*
384 * Write any modified data cache blocks out to memory and invalidate them.
385 * Does not invalidate the corresponding instruction cache blocks.
386 *
387 * flush_dcache_range(unsigned long start, unsigned long stop)
388 */
389_GLOBAL(flush_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000390 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000391 andc r3,r3,r5
392 subf r4,r3,r4
393 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000394 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000395 beqlr
396 mtctr r4
397
3981: dcbf 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000399 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000400 bdnz 1b
401 sync /* wait for dcbst's to get to ram */
402 blr
403
404/*
405 * Like above, but invalidate the D-cache. This is used by the 8xx
406 * to invalidate the cache so the PPC core doesn't get stale data
407 * from the CPM (no cache snooping here :-).
408 *
409 * invalidate_dcache_range(unsigned long start, unsigned long stop)
410 */
411_GLOBAL(invalidate_dcache_range)
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000412 li r5,L1_CACHE_BYTES-1
Paul Mackerras9994a332005-10-10 22:36:14 +1000413 andc r3,r3,r5
414 subf r4,r3,r4
415 add r4,r4,r5
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000416 srwi. r4,r4,L1_CACHE_SHIFT
Paul Mackerras9994a332005-10-10 22:36:14 +1000417 beqlr
418 mtctr r4
419
4201: dcbi 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000421 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000422 bdnz 1b
423 sync /* wait for dcbi's to get to ram */
424 blr
425
Paul Mackerras9994a332005-10-10 22:36:14 +1000426/*
427 * Flush a particular page from the data cache to RAM.
428 * Note: this is necessary because the instruction cache does *not*
429 * snoop from the data cache.
430 * This is a no-op on the 601 which has a unified cache.
431 *
432 * void __flush_dcache_icache(void *page)
433 */
434_GLOBAL(__flush_dcache_icache)
435BEGIN_FTR_SECTION
David Gibson4508dc22007-06-13 14:52:57 +1000436 blr
437END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
Ilya Yanokca9153a2008-12-11 04:55:41 +0300438 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
439 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
Paul Mackerras9994a332005-10-10 22:36:14 +1000440 mtctr r4
441 mr r6,r3
4420: dcbst 0,r3 /* Write line to ram */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000443 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000444 bdnz 0b
445 sync
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000446#ifdef CONFIG_44x
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100447 /* We don't flush the icache on 44x. Those have a virtual icache
448 * and we don't have access to the virtual address here (it's
449 * not the page vaddr but where it's mapped in user space). The
450 * flushing of the icache on these is handled elsewhere, when
451 * a change in the address space occurs, before returning to
452 * user space
453 */
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000454BEGIN_MMU_FTR_SECTION
455 blr
456END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
457#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000458 mtctr r4
4591: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000460 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000461 bdnz 1b
462 sync
463 isync
464 blr
465
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000466#ifndef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +1000467/*
468 * Flush a particular page from the data cache to RAM, identified
469 * by its physical address. We turn off the MMU so we can just use
470 * the physical address (this may be a highmem page without a kernel
471 * mapping).
472 *
473 * void __flush_dcache_icache_phys(unsigned long physaddr)
474 */
475_GLOBAL(__flush_dcache_icache_phys)
476BEGIN_FTR_SECTION
477 blr /* for 601, do nothing */
David Gibson4508dc22007-06-13 14:52:57 +1000478END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000479 mfmsr r10
480 rlwinm r0,r10,0,28,26 /* clear DR */
481 mtmsr r0
482 isync
Ilya Yanokca9153a2008-12-11 04:55:41 +0300483 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
484 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
Paul Mackerras9994a332005-10-10 22:36:14 +1000485 mtctr r4
486 mr r6,r3
4870: dcbst 0,r3 /* Write line to ram */
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000488 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000489 bdnz 0b
490 sync
491 mtctr r4
4921: icbi 0,r6
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000493 addi r6,r6,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000494 bdnz 1b
495 sync
496 mtmsr r10 /* restore DR */
497 isync
498 blr
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000499#endif /* CONFIG_BOOKE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000500
501/*
502 * Clear pages using the dcbz instruction, which doesn't cause any
503 * memory traffic (except to write out any cache lines which get
504 * displaced). This only works on cacheable memory.
505 *
506 * void clear_pages(void *page, int order) ;
507 */
508_GLOBAL(clear_pages)
Ilya Yanokca9153a2008-12-11 04:55:41 +0300509 li r0,PAGE_SIZE/L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000510 slw r0,r0,r4
511 mtctr r0
Paul Mackerras9994a332005-10-10 22:36:14 +10005121: dcbz 0,r3
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000513 addi r3,r3,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000514 bdnz 1b
515 blr
516
517/*
518 * Copy a whole page. We use the dcbz instruction on the destination
519 * to reduce memory traffic (it eliminates the unnecessary reads of
520 * the destination into cache). This requires that the destination
521 * is cacheable.
522 */
523#define COPY_16_BYTES \
524 lwz r6,4(r4); \
525 lwz r7,8(r4); \
526 lwz r8,12(r4); \
527 lwzu r9,16(r4); \
528 stw r6,4(r3); \
529 stw r7,8(r3); \
530 stw r8,12(r3); \
531 stwu r9,16(r3)
532
533_GLOBAL(copy_page)
534 addi r3,r3,-4
535 addi r4,r4,-4
536
Paul Mackerras9994a332005-10-10 22:36:14 +1000537 li r5,4
538
539#if MAX_COPY_PREFETCH > 1
540 li r0,MAX_COPY_PREFETCH
541 li r11,4
542 mtctr r0
54311: dcbt r11,r4
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000544 addi r11,r11,L1_CACHE_BYTES
Paul Mackerras9994a332005-10-10 22:36:14 +1000545 bdnz 11b
546#else /* MAX_COPY_PREFETCH == 1 */
547 dcbt r5,r4
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000548 li r11,L1_CACHE_BYTES+4
Paul Mackerras9994a332005-10-10 22:36:14 +1000549#endif /* MAX_COPY_PREFETCH */
Ilya Yanokca9153a2008-12-11 04:55:41 +0300550 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
Paul Mackerras9994a332005-10-10 22:36:14 +1000551 crclr 4*cr0+eq
5522:
553 mtctr r0
5541:
555 dcbt r11,r4
556 dcbz r5,r3
557 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000558#if L1_CACHE_BYTES >= 32
Paul Mackerras9994a332005-10-10 22:36:14 +1000559 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000560#if L1_CACHE_BYTES >= 64
Paul Mackerras9994a332005-10-10 22:36:14 +1000561 COPY_16_BYTES
562 COPY_16_BYTES
Stephen Rothwell7dffb722005-10-17 11:50:32 +1000563#if L1_CACHE_BYTES >= 128
Paul Mackerras9994a332005-10-10 22:36:14 +1000564 COPY_16_BYTES
565 COPY_16_BYTES
566 COPY_16_BYTES
567 COPY_16_BYTES
568#endif
569#endif
570#endif
571 bdnz 1b
572 beqlr
573 crnot 4*cr0+eq,4*cr0+eq
574 li r0,MAX_COPY_PREFETCH
575 li r11,4
576 b 2b
Paul Mackerras9994a332005-10-10 22:36:14 +1000577
578/*
579 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
580 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
581 */
582_GLOBAL(atomic_clear_mask)
58310: lwarx r5,0,r4
584 andc r5,r5,r3
585 PPC405_ERR77(0,r4)
586 stwcx. r5,0,r4
587 bne- 10b
588 blr
589_GLOBAL(atomic_set_mask)
59010: lwarx r5,0,r4
591 or r5,r5,r3
592 PPC405_ERR77(0,r4)
593 stwcx. r5,0,r4
594 bne- 10b
595 blr
596
597/*
Paul Mackerras9994a332005-10-10 22:36:14 +1000598 * Extended precision shifts.
599 *
600 * Updated to be valid for shift counts from 0 to 63 inclusive.
601 * -- Gabriel
602 *
603 * R3/R4 has 64 bit value
604 * R5 has shift count
605 * result in R3/R4
606 *
607 * ashrdi3: arithmetic right shift (sign propagation)
608 * lshrdi3: logical right shift
609 * ashldi3: left shift
610 */
611_GLOBAL(__ashrdi3)
612 subfic r6,r5,32
613 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
614 addi r7,r5,32 # could be xori, or addi with -32
615 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
616 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
617 sraw r7,r3,r7 # t2 = MSW >> (count-32)
618 or r4,r4,r6 # LSW |= t1
619 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
620 sraw r3,r3,r5 # MSW = MSW >> count
621 or r4,r4,r7 # LSW |= t2
622 blr
623
624_GLOBAL(__ashldi3)
625 subfic r6,r5,32
626 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
627 addi r7,r5,32 # could be xori, or addi with -32
628 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
629 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
630 or r3,r3,r6 # MSW |= t1
631 slw r4,r4,r5 # LSW = LSW << count
632 or r3,r3,r7 # MSW |= t2
633 blr
634
635_GLOBAL(__lshrdi3)
636 subfic r6,r5,32
637 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
638 addi r7,r5,32 # could be xori, or addi with -32
639 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
640 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
641 or r4,r4,r6 # LSW |= t1
642 srw r3,r3,r5 # MSW = MSW >> count
643 or r4,r4,r7 # LSW |= t2
644 blr
645
Paul Mackerras95ff54f2008-03-13 09:39:55 +1100646/*
647 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
648 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
649 */
650_GLOBAL(__ucmpdi2)
651 cmplw r3,r5
652 li r3,1
653 bne 1f
654 cmplw r4,r6
655 beqlr
6561: li r3,0
657 bltlr
658 li r3,2
659 blr
660
David Woodhouseca9d7ae2013-05-13 00:23:38 +0000661_GLOBAL(__bswapdi2)
662 rotlwi r9,r4,8
663 rotlwi r10,r3,8
664 rlwimi r9,r4,24,0,7
665 rlwimi r10,r3,24,0,7
666 rlwimi r9,r4,24,16,23
667 rlwimi r10,r3,24,16,23
668 mr r3,r9
669 mr r4,r10
670 blr
671
Paul Mackerras9994a332005-10-10 22:36:14 +1000672_GLOBAL(abs)
673 srawi r4,r3,31
674 xor r3,r3,r4
675 sub r3,r3,r4
676 blr
677
Benjamin Herrenschmidt69e3cea2011-05-19 13:07:12 +1000678#ifdef CONFIG_SMP
679_GLOBAL(start_secondary_resume)
680 /* Reset stack */
Stuart Yoder9778b692012-07-05 04:41:35 +0000681 CURRENT_THREAD_INFO(r1, r1)
Benjamin Herrenschmidt69e3cea2011-05-19 13:07:12 +1000682 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
683 li r3,0
Josh Boyer6de06f32011-05-20 16:22:25 -0400684 stw r3,0(r1) /* Zero the stack frame pointer */
Benjamin Herrenschmidt69e3cea2011-05-19 13:07:12 +1000685 bl start_secondary
686 b .
687#endif /* CONFIG_SMP */
688
Paul Mackerras9994a332005-10-10 22:36:14 +1000689/*
690 * This routine is just here to keep GCC happy - sigh...
691 */
692_GLOBAL(__main)
693 blr
Michael Ellerman3d1229d2005-11-14 23:35:00 +1100694
695#ifdef CONFIG_KEXEC
696 /*
697 * Must be relocatable PIC code callable as a C function.
698 */
699 .globl relocate_new_kernel
700relocate_new_kernel:
701 /* r3 = page_list */
702 /* r4 = reboot_code_buffer */
703 /* r5 = start_address */
704
Sebastian Andrzej Siewiorb3df8952010-04-04 22:19:03 +0200705#ifdef CONFIG_FSL_BOOKE
706
707 mr r29, r3
708 mr r30, r4
709 mr r31, r5
710
711#define ENTRY_MAPPING_KEXEC_SETUP
712#include "fsl_booke_entry_mapping.S"
713#undef ENTRY_MAPPING_KEXEC_SETUP
714
715 mr r3, r29
716 mr r4, r30
717 mr r5, r31
718
719 li r0, 0
Suzuki Poulose68343022012-04-15 22:27:18 +0000720#elif defined(CONFIG_44x)
Suzuki Poulose674bfa42011-07-18 03:29:20 +0000721
Suzuki Poulose68343022012-04-15 22:27:18 +0000722 /* Save our parameters */
723 mr r29, r3
724 mr r30, r4
725 mr r31, r5
726
727#ifdef CONFIG_PPC_47x
728 /* Check for 47x cores */
729 mfspr r3,SPRN_PVR
730 srwi r3,r3,16
731 cmplwi cr0,r3,PVR_476@h
732 beq setup_map_47x
733 cmplwi cr0,r3,PVR_476_ISS@h
734 beq setup_map_47x
735#endif /* CONFIG_PPC_47x */
736
Suzuki Poulose674bfa42011-07-18 03:29:20 +0000737/*
738 * Code for setting up 1:1 mapping for PPC440x for KEXEC
739 *
740 * We cannot switch off the MMU on PPC44x.
741 * So we:
742 * 1) Invalidate all the mappings except the one we are running from.
743 * 2) Create a tmp mapping for our code in the other address space(TS) and
744 * jump to it. Invalidate the entry we started in.
745 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
746 * 4) Jump to the 1:1 mapping in original TS.
747 * 5) Invalidate the tmp mapping.
748 *
749 * - Based on the kexec support code for FSL BookE
Suzuki Poulose674bfa42011-07-18 03:29:20 +0000750 *
751 */
Suzuki Poulose674bfa42011-07-18 03:29:20 +0000752
Suzuki Poulosef13bfcc62012-04-15 21:48:21 +0000753 /*
754 * Load the PID with kernel PID (0).
755 * Also load our MSR_IS and TID to MMUCR for TLB search.
756 */
757 li r3, 0
758 mtspr SPRN_PID, r3
Suzuki Poulose674bfa42011-07-18 03:29:20 +0000759 mfmsr r4
760 andi. r4,r4,MSR_IS@l
761 beq wmmucr
762 oris r3,r3,PPC44x_MMUCR_STS@h
763wmmucr:
764 mtspr SPRN_MMUCR,r3
765 sync
766
767 /*
768 * Invalidate all the TLB entries except the current entry
769 * where we are running from
770 */
771 bl 0f /* Find our address */
7720: mflr r5 /* Make it accessible */
773 tlbsx r23,0,r5 /* Find entry we are in */
774 li r4,0 /* Start at TLB entry 0 */
775 li r3,0 /* Set PAGEID inval value */
7761: cmpw r23,r4 /* Is this our entry? */
777 beq skip /* If so, skip the inval */
778 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
779skip:
780 addi r4,r4,1 /* Increment */
781 cmpwi r4,64 /* Are we done? */
782 bne 1b /* If not, repeat */
783 isync
784
785 /* Create a temp mapping and jump to it */
786 andi. r6, r23, 1 /* Find the index to use */
787 addi r24, r6, 1 /* r24 will contain 1 or 2 */
788
789 mfmsr r9 /* get the MSR */
790 rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */
791 xori r7, r5, 1 /* Use the other address space */
792
793 /* Read the current mapping entries */
794 tlbre r3, r23, PPC44x_TLB_PAGEID
795 tlbre r4, r23, PPC44x_TLB_XLAT
796 tlbre r5, r23, PPC44x_TLB_ATTRIB
797
798 /* Save our current XLAT entry */
799 mr r25, r4
800
801 /* Extract the TLB PageSize */
802 li r10, 1 /* r10 will hold PageSize */
803 rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */
804
805 /* XXX: As of now we use 256M, 4K pages */
806 cmpwi r11, PPC44x_TLB_256M
807 bne tlb_4k
808 rotlwi r10, r10, 28 /* r10 = 256M */
809 b write_out
810tlb_4k:
811 cmpwi r11, PPC44x_TLB_4K
812 bne default
813 rotlwi r10, r10, 12 /* r10 = 4K */
814 b write_out
815default:
816 rotlwi r10, r10, 10 /* r10 = 1K */
817
818write_out:
819 /*
820 * Write out the tmp 1:1 mapping for this code in other address space
821 * Fixup EPN = RPN , TS=other address space
822 */
823 insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */
824
825 /* Write out the tmp mapping entries */
826 tlbwe r3, r24, PPC44x_TLB_PAGEID
827 tlbwe r4, r24, PPC44x_TLB_XLAT
828 tlbwe r5, r24, PPC44x_TLB_ATTRIB
829
830 subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */
831 not r10, r11 /* Mask for PageNum */
832
833 /* Switch to other address space in MSR */
834 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
835
836 bl 1f
8371: mflr r8
838 addi r8, r8, (2f-1b) /* Find the target offset */
839
840 /* Jump to the tmp mapping */
841 mtspr SPRN_SRR0, r8
842 mtspr SPRN_SRR1, r9
843 rfi
844
8452:
846 /* Invalidate the entry we were executing from */
847 li r3, 0
848 tlbwe r3, r23, PPC44x_TLB_PAGEID
849
850 /* attribute fields. rwx for SUPERVISOR mode */
851 li r5, 0
852 ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
853
854 /* Create 1:1 mapping in 256M pages */
855 xori r7, r7, 1 /* Revert back to Original TS */
856
857 li r8, 0 /* PageNumber */
858 li r6, 3 /* TLB Index, start at 3 */
859
860next_tlb:
861 rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */
862 mr r4, r3 /* RPN = EPN */
863 ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
864 insrwi r3, r7, 1, 23 /* Set TS from r7 */
865
866 tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */
867 tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */
868 tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */
869
870 addi r8, r8, 1 /* Increment PN */
871 addi r6, r6, 1 /* Increment TLB Index */
872 cmpwi r8, 8 /* Are we done ? */
873 bne next_tlb
874 isync
875
876 /* Jump to the new mapping 1:1 */
877 li r9,0
878 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
879
880 bl 1f
8811: mflr r8
882 and r8, r8, r11 /* Get our offset within page */
883 addi r8, r8, (2f-1b)
884
885 and r5, r25, r10 /* Get our target PageNum */
886 or r8, r8, r5 /* Target jump address */
887
888 mtspr SPRN_SRR0, r8
889 mtspr SPRN_SRR1, r9
890 rfi
8912:
892 /* Invalidate the tmp entry we used */
893 li r3, 0
894 tlbwe r3, r24, PPC44x_TLB_PAGEID
895 sync
Suzuki Poulose68343022012-04-15 22:27:18 +0000896 b ppc44x_map_done
897
898#ifdef CONFIG_PPC_47x
899
900 /* 1:1 mapping for 47x */
901
902setup_map_47x:
903
904 /*
905 * Load the kernel pid (0) to PID and also to MMUCR[TID].
906 * Also set the MSR IS->MMUCR STS
907 */
908 li r3, 0
909 mtspr SPRN_PID, r3 /* Set PID */
910 mfmsr r4 /* Get MSR */
911 andi. r4, r4, MSR_IS@l /* TS=1? */
912 beq 1f /* If not, leave STS=0 */
913 oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
9141: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
915 sync
916
917 /* Find the entry we are running from */
918 bl 2f
9192: mflr r23
920 tlbsx r23, 0, r23
921 tlbre r24, r23, 0 /* TLB Word 0 */
922 tlbre r25, r23, 1 /* TLB Word 1 */
923 tlbre r26, r23, 2 /* TLB Word 2 */
924
925
926 /*
927 * Invalidates all the tlb entries by writing to 256 RPNs(r4)
928 * of 4k page size in all 4 ways (0-3 in r3).
929 * This would invalidate the entire UTLB including the one we are
930 * running from. However the shadow TLB entries would help us
931 * to continue the execution, until we flush them (rfi/isync).
932 */
933 addis r3, 0, 0x8000 /* specify the way */
934 addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
935 addi r5, 0, 0
936 b clear_utlb_entry
937
938 /* Align the loop to speed things up. from head_44x.S */
939 .align 6
940
941clear_utlb_entry:
942
943 tlbwe r4, r3, 0
944 tlbwe r5, r3, 1
945 tlbwe r5, r3, 2
946 addis r3, r3, 0x2000 /* Increment the way */
947 cmpwi r3, 0
948 bne clear_utlb_entry
949 addis r3, 0, 0x8000
950 addis r4, r4, 0x100 /* Increment the EPN */
951 cmpwi r4, 0
952 bne clear_utlb_entry
953
954 /* Create the entries in the other address space */
955 mfmsr r5
956 rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
957 xori r7, r7, 1 /* r7 = !TS */
958
959 insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
960
961 /*
962 * write out the TLB entries for the tmp mapping
963 * Use way '0' so that we could easily invalidate it later.
964 */
965 lis r3, 0x8000 /* Way '0' */
966
967 tlbwe r24, r3, 0
968 tlbwe r25, r3, 1
969 tlbwe r26, r3, 2
970
971 /* Update the msr to the new TS */
972 insrwi r5, r7, 1, 26
973
974 bl 1f
9751: mflr r6
976 addi r6, r6, (2f-1b)
977
978 mtspr SPRN_SRR0, r6
979 mtspr SPRN_SRR1, r5
980 rfi
981
982 /*
983 * Now we are in the tmp address space.
984 * Create a 1:1 mapping for 0-2GiB in the original TS.
985 */
9862:
987 li r3, 0
988 li r4, 0 /* TLB Word 0 */
989 li r5, 0 /* TLB Word 1 */
990 li r6, 0
991 ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
992
993 li r8, 0 /* PageIndex */
994
995 xori r7, r7, 1 /* revert back to original TS */
996
997write_utlb:
998 rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
999 /* ERPN = 0 as we don't use memory above 2G */
1000
1001 mr r4, r5 /* EPN = RPN */
1002 ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
1003 insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
1004
1005 tlbwe r4, r3, 0 /* Write out the entries */
1006 tlbwe r5, r3, 1
1007 tlbwe r6, r3, 2
1008 addi r8, r8, 1
1009 cmpwi r8, 8 /* Have we completed ? */
1010 bne write_utlb
1011
1012 /* make sure we complete the TLB write up */
1013 isync
1014
1015 /*
1016 * Prepare to jump to the 1:1 mapping.
1017 * 1) Extract page size of the tmp mapping
1018 * DSIZ = TLB_Word0[22:27]
1019 * 2) Calculate the physical address of the address
1020 * to jump to.
1021 */
1022 rlwinm r10, r24, 0, 22, 27
1023
1024 cmpwi r10, PPC47x_TLB0_4K
1025 bne 0f
1026 li r10, 0x1000 /* r10 = 4k */
1027 bl 1f
1028
10290:
1030 /* Defaults to 256M */
1031 lis r10, 0x1000
1032
1033 bl 1f
10341: mflr r4
1035 addi r4, r4, (2f-1b) /* virtual address of 2f */
1036
1037 subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
1038 not r10, r11 /* Pagemask = ~(offsetmask) */
1039
1040 and r5, r25, r10 /* Physical page */
1041 and r6, r4, r11 /* offset within the current page */
1042
1043 or r5, r5, r6 /* Physical address for 2f */
1044
1045 /* Switch the TS in MSR to the original one */
1046 mfmsr r8
1047 insrwi r8, r7, 1, 26
1048
1049 mtspr SPRN_SRR1, r8
1050 mtspr SPRN_SRR0, r5
1051 rfi
1052
10532:
1054 /* Invalidate the tmp mapping */
1055 lis r3, 0x8000 /* Way '0' */
1056
1057 clrrwi r24, r24, 12 /* Clear the valid bit */
1058 tlbwe r24, r3, 0
1059 tlbwe r25, r3, 1
1060 tlbwe r26, r3, 2
1061
1062 /* Make sure we complete the TLB write and flush the shadow TLB */
1063 isync
1064
1065#endif
1066
1067ppc44x_map_done:
1068
Suzuki Poulose674bfa42011-07-18 03:29:20 +00001069
1070 /* Restore the parameters */
1071 mr r3, r29
1072 mr r4, r30
1073 mr r5, r31
1074
1075 li r0, 0
Sebastian Andrzej Siewiorb3df8952010-04-04 22:19:03 +02001076#else
Michael Ellerman3d1229d2005-11-14 23:35:00 +11001077 li r0, 0
1078
1079 /*
1080 * Set Machine Status Register to a known status,
1081 * switch the MMU off and jump to 1: in a single step.
1082 */
1083
1084 mr r8, r0
1085 ori r8, r8, MSR_RI|MSR_ME
1086 mtspr SPRN_SRR1, r8
1087 addi r8, r4, 1f - relocate_new_kernel
1088 mtspr SPRN_SRR0, r8
1089 sync
1090 rfi
1091
10921:
Sebastian Andrzej Siewiorb3df8952010-04-04 22:19:03 +02001093#endif
Michael Ellerman3d1229d2005-11-14 23:35:00 +11001094 /* from this point address translation is turned off */
1095 /* and interrupts are disabled */
1096
1097 /* set a new stack at the bottom of our page... */
1098 /* (not really needed now) */
Paul Collinsd9178f42008-08-16 18:55:54 +10001099 addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
Michael Ellerman3d1229d2005-11-14 23:35:00 +11001100 stw r0, 0(r1)
1101
1102 /* Do the copies */
1103 li r6, 0 /* checksum */
1104 mr r0, r3
1105 b 1f
1106
11070: /* top, read another word for the indirection page */
1108 lwzu r0, 4(r3)
1109
11101:
1111 /* is it a destination page? (r8) */
1112 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
1113 beq 2f
1114
1115 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
1116 b 0b
1117
11182: /* is it an indirection page? (r3) */
1119 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
1120 beq 2f
1121
1122 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
1123 subi r3, r3, 4
1124 b 0b
1125
11262: /* are we done? */
1127 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
1128 beq 2f
1129 b 3f
1130
11312: /* is it a source page? (r9) */
1132 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
1133 beq 0b
1134
1135 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
1136
1137 li r7, PAGE_SIZE / 4
1138 mtctr r7
1139 subi r9, r9, 4
1140 subi r8, r8, 4
11419:
1142 lwzu r0, 4(r9) /* do the copy */
1143 xor r6, r6, r0
1144 stwu r0, 4(r8)
1145 dcbst 0, r8
1146 sync
1147 icbi 0, r8
1148 bdnz 9b
1149
1150 addi r9, r9, 4
1151 addi r8, r8, 4
1152 b 0b
1153
11543:
1155
1156 /* To be certain of avoiding problems with self-modifying code
1157 * execute a serializing instruction here.
1158 */
1159 isync
1160 sync
1161
Matthew McClintock4562c982010-08-27 11:58:21 +00001162 mfspr r3, SPRN_PIR /* current core we are running on */
1163 mr r4, r5 /* load physical address of chunk called */
1164
Michael Ellerman3d1229d2005-11-14 23:35:00 +11001165 /* jump to the entry point, usually the setup routine */
1166 mtlr r5
1167 blrl
1168
11691: b 1b
1170
1171relocate_new_kernel_end:
1172
1173 .globl relocate_new_kernel_size
1174relocate_new_kernel_size:
1175 .long relocate_new_kernel_end - relocate_new_kernel
1176#endif