MN10300: Rename __flush_tlb*() to local_flush_tlb*()

Rename __flush_tlb*() to local_flush_tlb*() as it's more appropriate, and ready
to differentiate local from global TLB flushes when SMP is introduced.

Whilst we're at it, get rid of __flush_tlb_global() and make
local_flush_tlb_page() take an mm_struct pointer rather than VMA pointer.

Signed-off-by: David Howells <dhowells@redhat.com>
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 1a7e292..5d54bf5 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -13,21 +13,37 @@
 
 #include <asm/processor.h>
 
-#define __flush_tlb()						\
-do {								\
-	int w;							\
-	__asm__ __volatile__					\
-		("	mov %1,%0		\n"		\
-		 "	or %2,%0		\n"		\
-		 "	mov %0,%1		\n"		\
-		 : "=d"(w)					\
-		 : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)	\
-		 : "cc", "memory"				\
-		 );						\
-} while (0)
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+	int w;
+	asm volatile(
+		"	mov	%1,%0		\n"
+		"	or	%2,%0		\n"
+		"	mov	%0,%1		\n"
+		: "=d"(w)
+		: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+		: "cc", "memory");
+}
 
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+#define local_flush_tlb_all()		local_flush_tlb()
+
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+#define local_flush_tlb_one(addr)	local_flush_tlb()
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr);
 
 
 /*
@@ -43,14 +59,14 @@
 #define flush_tlb_all()				\
 do {						\
 	preempt_disable();			\
-	__flush_tlb_all();			\
+	local_flush_tlb_all();			\
 	preempt_enable();			\
 } while (0)
 
 #define flush_tlb_mm(mm)			\
 do {						\
 	preempt_disable();			\
-	__flush_tlb_all();			\
+	local_flush_tlb_all();			\
 	preempt_enable();			\
 } while (0)
 
@@ -59,13 +75,13 @@
 	unsigned long __s __attribute__((unused)) = (start);	\
 	unsigned long __e __attribute__((unused)) = (end);	\
 	preempt_disable();					\
-	__flush_tlb_all();					\
+	local_flush_tlb_all();					\
 	preempt_enable();					\
 } while (0)
 
+#define flush_tlb_page(vma, addr)	local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb()			flush_tlb_all()
 
-#define __flush_tlb_global()			flush_tlb_all()
-#define flush_tlb()				flush_tlb_all()
 #define flush_tlb_kernel_range(start, end)			\
 do {								\
 	unsigned long __s __attribute__((unused)) = (start);	\
@@ -73,8 +89,6 @@
 	flush_tlb_all();					\
 } while (0)
 
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
 #define flush_tlb_pgtables(mm, start, end)	do {} while (0)
 
 #endif /* _ASM_TLBFLUSH_H */