sh: Preparation for uncached jumps through PMB.

Presently most of the 29-bit physical parts do P1/P2 segmentation
with a 1:1 cached/uncached mapping, jumping between the two to
control the caching behaviour. This provides the basic infrastructure
to maintain this behaviour on 32-bit physical parts that don't map
P1/P2 at all, using a shiny new linker section and corresponding
fixmap entry.

Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 226b190..43d7ff6b 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -190,7 +190,7 @@
  * .. which happens to be the same behavior as flush_icache_range().
  * So, we simply flush out a line.
  */
-void flush_cache_sigtramp(unsigned long addr)
+void __uses_jump_to_uncached flush_cache_sigtramp(unsigned long addr)
 {
 	unsigned long v, index;
 	unsigned long flags;
@@ -205,13 +205,13 @@
 			(v & boot_cpu_data.icache.entry_mask);
 
 	local_irq_save(flags);
-	jump_to_P2();
+	jump_to_uncached();
 
 	for (i = 0; i < boot_cpu_data.icache.ways;
 	     i++, index += boot_cpu_data.icache.way_incr)
 		ctrl_outl(0, index);	/* Clear out Valid-bit */
 
-	back_to_P1();
+	back_to_cached();
 	wmb();
 	local_irq_restore(flags);
 }
@@ -256,12 +256,12 @@
 }
 
 /* TODO: Selective icache invalidation through IC address array.. */
-static inline void flush_icache_all(void)
+static inline void __uses_jump_to_uncached flush_icache_all(void)
 {
 	unsigned long flags, ccr;
 
 	local_irq_save(flags);
-	jump_to_P2();
+	jump_to_uncached();
 
 	/* Flush I-cache */
 	ccr = ctrl_inl(CCR);
@@ -269,11 +269,11 @@
 	ctrl_outl(ccr, CCR);
 
 	/*
-	 * back_to_P1() will take care of the barrier for us, don't add
+	 * back_to_cached() will take care of the barrier for us, don't add
 	 * another one!
 	 */
 
-	back_to_P1();
+	back_to_cached();
 	local_irq_restore(flags);
 }