sh: Support explicit L1 cache disabling.

This reworks the cache mode configuration in Kconfig, and allows for
explicit selection of write-back/write-through/off configurations.
All of the cache flushing routines are optimized away for the off
case.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 093d491..c277755 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -2,7 +2,6 @@
 # Processor families
 #
 config CPU_SH2
-	select SH_WRITETHROUGH if !CPU_SH2A
 	bool
 
 config CPU_SH2A
@@ -414,8 +413,17 @@
 	  Turn this option off for platforms that do not have a direct-mapped
 	  cache, and you have no need to run the caches in such a configuration.
 
-config SH_WRITETHROUGH
-	bool "Use write-through caching"
+choice
+	prompt "Cache mode"
+	default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
+	default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
+
+config CACHE_WRITEBACK
+	bool "Write-back"
+	depends on CPU_SH2A || CPU_SH3 || CPU_SH4
+
+config CACHE_WRITETHROUGH
+	bool "Write-through"
 	help
 	  Selecting this option will configure the caches in write-through
 	  mode, as opposed to the default write-back configuration.
@@ -426,4 +434,9 @@
 
 	  If unsure, say N.
 
+config CACHE_OFF
+	bool "Off"
+
+endchoice
+
 endmenu
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 4061e89..e73d7f9 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -4,9 +4,10 @@
 
 obj-y			:= init.o extable.o consistent.o
 
-obj-$(CONFIG_CPU_SH2)	+= cache-sh2.o
-obj-$(CONFIG_CPU_SH3)	+= cache-sh3.o
-obj-$(CONFIG_CPU_SH4)	+= cache-sh4.o
+cache-$(CONFIG_CPU_SH2)		:= cache-sh2.o
+cache-$(CONFIG_CPU_SH3)		:= cache-sh3.o
+cache-$(CONFIG_CPU_SH4)		:= cache-sh4.o pg-sh4.o
+cache-$(CONFIG_CACHE_OFF)	:=
 
 mmu-y			:= tlb-nommu.o pg-nommu.o
 mmu-$(CONFIG_CPU_SH3)	+= fault-nommu.o
@@ -14,7 +15,7 @@
 mmu-$(CONFIG_MMU)	:= fault.o clear_page.o copy_page.o tlb-flush.o	\
 			   ioremap.o
 
-obj-y			+= $(mmu-y)
+obj-y			+= $(cache-y) $(mmu-y)
 
 ifdef CONFIG_DEBUG_FS
 obj-$(CONFIG_CPU_SH4)		+= cache-debugfs.o
@@ -22,7 +23,7 @@
 
 ifdef CONFIG_MMU
 obj-$(CONFIG_CPU_SH3)		+= tlb-sh3.o
-obj-$(CONFIG_CPU_SH4)		+= tlb-sh4.o pg-sh4.o
+obj-$(CONFIG_CPU_SH4)		+= tlb-sh4.o
 obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o
 endif
 
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index a08a4a9..7d43758 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -145,7 +145,7 @@
 
 	ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
 
-#ifdef CONFIG_SH_WRITETHROUGH
+#ifdef CONFIG_CACHE_WRITETHROUGH
 	/*
 	 * When we are in 32-bit address extended mode, CCR.CB becomes
 	 * invalid, so care must be taken to manually adjust cacheable
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index f74cf66..13fde8c 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -34,22 +34,27 @@
 	unsigned long flags;
 	unsigned long pteval;
 	unsigned long vpn;
-	struct page *page;
-	unsigned long pfn;
 
 	/* Ptrace may call this routine. */
 	if (vma && current->active_mm != vma->vm_mm)
 		return;
 
-	pfn = pte_pfn(pte);
-	if (pfn_valid(pfn)) {
-		page = pfn_to_page(pfn);
-		if (!test_bit(PG_mapped, &page->flags)) {
-			unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
-			__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
-			__set_bit(PG_mapped, &page->flags);
+#ifndef CONFIG_CACHE_OFF
+	{
+		unsigned long pfn = pte_pfn(pte);
+
+		if (pfn_valid(pfn)) {
+			struct page *page = pfn_to_page(pfn);
+
+			if (!test_bit(PG_mapped, &page->flags)) {
+				unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+				__flush_wback_region((void *)P1SEGADDR(phys),
+						     PAGE_SIZE);
+				__set_bit(PG_mapped, &page->flags);
+			}
 		}
 	}
+#endif
 
 	local_irq_save(flags);
 
@@ -66,7 +71,7 @@
 
 	/* Set PTEL register */
 	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-#ifdef CONFIG_SH_WRITETHROUGH
+#ifdef CONFIG_CACHE_WRITETHROUGH
 	pteval |= _PAGE_WT;
 #endif
 	/* conveniently, we want all the software flags to be 0 anyway */