sh: Preparation for uncached jumps through PMB.

Presently most of the 29-bit physical parts do P1/P2 segmentation
with a 1:1 cached/uncached mapping, jumping between the two to
control the caching behaviour. This provides the basic infrastructure
to maintain this behaviour on 32-bit physical parts that don't map
P1/P2 at all, using a shiny new linker section and corresponding
fixmap entry.

Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index fd1688e6..0f0c76a 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -64,11 +64,11 @@
  * Generic first-level cache init
  */
 #ifdef CONFIG_SUPERH32
-static void __init cache_init(void)
+static void __uses_jump_to_uncached cache_init(void)
 {
 	unsigned long ccr, flags;
 
-	jump_to_P2();
+	jump_to_uncached();
 	ccr = ctrl_inl(CCR);
 
 	/*
@@ -145,7 +145,7 @@
 #endif
 
 	ctrl_outl(flags, CCR);
-	back_to_P1();
+	back_to_cached();
 }
 #else
 #define cache_init()	do { } while (0)