[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET

This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't
looked at any of the PPC32 code, if we ever want to support Kdump on
PPC we'll have to do another audit, ditto for iSeries.

This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1
gazillion for 64-bit.

To get a physical address from a virtual one you subtract PAGE_OFFSET,
_not_ KERNELBASE.

KERNELBASE is the virtual address of the start of the kernel, it's
often the same as PAGE_OFFSET, but _might not be_.

If you want to know something's offset from the start of the kernel
you should subtract KERNELBASE.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 893dd24..5de0d80 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -60,7 +60,7 @@
  *
  * The display is mapped to virtual address 0xD0000000, rather
  * than 1:1, because some some CHRP machines put the frame buffer
- * in the region starting at 0xC0000000 (KERNELBASE).
+ * in the region starting at 0xC0000000 (PAGE_OFFSET).
  * This mapping is temporary and will disappear as soon as the
  * setup done by MMU_Init() is applied.
  *
@@ -71,7 +71,7 @@
  */
 void __init btext_prepare_BAT(void)
 {
-	unsigned long vaddr = KERNELBASE + 0x10000000;
+	unsigned long vaddr = PAGE_OFFSET + 0x10000000;
 	unsigned long addr;
 	unsigned long lowbits;
 
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 7b93971..aacebb3 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -690,7 +690,7 @@
 
 	/* Setup our real return addr */	
 	SET_REG_TO_LABEL(r4,.rtas_return_loc)
-	SET_REG_TO_CONST(r9,KERNELBASE)
+	SET_REG_TO_CONST(r9,PAGE_OFFSET)
 	sub	r4,r4,r9
        	mtlr	r4
 
@@ -718,7 +718,7 @@
 _STATIC(rtas_return_loc)
 	/* relocation is off at this point */
 	mfspr	r4,SPRN_SPRG3	        /* Get PACA */
-	SET_REG_TO_CONST(r5, KERNELBASE)
+	SET_REG_TO_CONST(r5, PAGE_OFFSET)
         sub     r4,r4,r5                /* RELOC the PACA base pointer */
 
 	mfmsr   r6
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index 5a05a79..8a53d43 100644
--- a/arch/powerpc/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
@@ -16,8 +16,8 @@
 	.xSegmentTableOffs = STAB0_PAGE,
 
 	.xEsids = {
-		{ .xKernelEsid = GET_ESID(KERNELBASE),
-		  .xKernelVsid = KERNEL_VSID(KERNELBASE), },
+		{ .xKernelEsid = GET_ESID(PAGE_OFFSET),
+		  .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
 		{ .xKernelEsid = GET_ESID(VMALLOCBASE),
 		  .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
 	},
@@ -25,7 +25,7 @@
 	.xRanges = {
 		{ .xPages = HvPagesToMap,
 		  .xOffset = 0,
-		  .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT),
+		  .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
 		},
 	},
 };
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index ec0f06b..0b0fa47 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -153,9 +153,8 @@
 	 * including ones that were in place on the original copy
 	 */
 	for (i = 0; i < nr_segments; i++)
-		flush_icache_range(ranges[i].mem + KERNELBASE,
-				ranges[i].mem + KERNELBASE +
-				ranges[i].memsz);
+		flush_icache_range((unsigned long)__va(ranges[i].mem),
+			(unsigned long)__va(ranges[i].mem + ranges[i].memsz));
 }
 
 #ifdef CONFIG_SMP