sh: Add kmap_coherent()/kunmap_coherent() interface for SH-4.

This wires up kmap_coherent() and kunmap_coherent() on SH-4, and
moves away from the p3map_mutex and reserved P3 space, opting to
use fixmaps for colouring instead.

The copy_user_page()/clear_user_page() implementations are moved
to this, which fixes the nasty blowups with spinlock debugging
as a result of having some of these calls nested under the page
table lock.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index df69da9..82b48e6 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -2,7 +2,7 @@
  * arch/sh/mm/pg-sh4.c
  *
  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
- * Copyright (C) 2002 - 2005  Paul Mundt
+ * Copyright (C) 2002 - 2007  Paul Mundt
  *
  * Released under the terms of the GNU GPL v2.0.
  */
@@ -11,10 +11,35 @@
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
-extern struct mutex p3map_mutex[];
-
 #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
 
+static inline void *kmap_coherent(struct page *page, unsigned long addr)
+{
+	enum fixed_addresses idx;
+	unsigned long vaddr, flags;
+	pte_t pte;
+
+	inc_preempt_count();
+
+	idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
+	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
+	pte = mk_pte(page, PAGE_KERNEL);
+
+	local_irq_save(flags);
+	flush_tlb_one(get_asid(), vaddr);
+	local_irq_restore(flags);
+
+	update_mmu_cache(NULL, vaddr, pte);
+
+	return (void *)vaddr;
+}
+
+static inline void kunmap_coherent(struct page *page)
+{
+	dec_preempt_count();
+	preempt_check_resched();
+}
+
 /*
  * clear_user_page
  * @to: P1 address
@@ -27,25 +52,9 @@
 	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
 		clear_page(to);
 	else {
-		unsigned long phys_addr = PHYSADDR(to);
-		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
-		pgd_t *pgd = pgd_offset_k(p3_addr);
-		pud_t *pud = pud_offset(pgd, p3_addr);
-		pmd_t *pmd = pmd_offset(pud, p3_addr);
-		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
-		pte_t entry;
-		unsigned long flags;
-
-		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
-		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
-		set_pte(pte, entry);
-		local_irq_save(flags);
-		flush_tlb_one(get_asid(), p3_addr);
-		local_irq_restore(flags);
-		update_mmu_cache(NULL, p3_addr, entry);
-		__clear_user_page((void *)p3_addr, to);
-		pte_clear(&init_mm, p3_addr, pte);
-		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
+		void *vto = kmap_coherent(page, address);
+		__clear_user_page(vto, to);
+		kunmap_coherent(vto);
 	}
 }
 
@@ -63,25 +72,9 @@
 	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
 		copy_page(to, from);
 	else {
-		unsigned long phys_addr = PHYSADDR(to);
-		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
-		pgd_t *pgd = pgd_offset_k(p3_addr);
-		pud_t *pud = pud_offset(pgd, p3_addr);
-		pmd_t *pmd = pmd_offset(pud, p3_addr);
-		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
-		pte_t entry;
-		unsigned long flags;
-
-		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
-		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
-		set_pte(pte, entry);
-		local_irq_save(flags);
-		flush_tlb_one(get_asid(), p3_addr);
-		local_irq_restore(flags);
-		update_mmu_cache(NULL, p3_addr, entry);
-		__copy_user_page((void *)p3_addr, from, to);
-		pte_clear(&init_mm, p3_addr, pte);
-		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
+		void *vfrom = kmap_coherent(page, address);
+		__copy_user_page(vfrom, from, to);
+		kunmap_coherent(vfrom);
 	}
 }