[XTENSA] Add support for cache-aliasing

Add support for processors that have cache-aliasing issues, such as
the Stretch S5000 processor. Cache-aliasing means that the size of
the cache (for one way) is larger than the page size, thus, a page
can end up in several places in cache depending on the virtual to
physical translation. The method used here is to map a user page
temporarily through the auto-refill way 0 and of of the DTLB.
We probably will want to revisit this issue and use a better
approach with kmap/kunmap.

Signed-off-by: Chris Zankel <chris@zankel.net>
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h
index 22ef901..b773c57 100644
--- a/include/asm-xtensa/cacheflush.h
+++ b/include/asm-xtensa/cacheflush.h
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * (C) 2001 - 2006 Tensilica Inc.
+ * (C) 2001 - 2007 Tensilica Inc.
  */
 
 #ifndef _XTENSA_CACHEFLUSH_H
@@ -18,10 +18,7 @@
 #include <asm/page.h>
 
 /*
- * flush and invalidate data cache, invalidate instruction cache:
- *
- * __flush_invalidate_cache_all()
- * __flush_invalidate_cache_range(from,sze)
+ * Lo-level routines for cache flushing.
  *
  * invalidate data or instruction cache:
  *
@@ -40,26 +37,39 @@
  * __flush_invalidate_dcache_all()
  * __flush_invalidate_dcache_page(adr)
  * __flush_invalidate_dcache_range(from,size)
+ *
+ * specials for cache aliasing:
+ *
+ * __flush_invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_icache_page_alias(vaddr,paddr)
  */
 
-extern void __flush_invalidate_cache_all(void);
-extern void __flush_invalidate_cache_range(unsigned long, unsigned long);
-extern void __flush_invalidate_dcache_all(void);
+extern void __invalidate_dcache_all(void);
 extern void __invalidate_icache_all(void);
-
 extern void __invalidate_dcache_page(unsigned long);
 extern void __invalidate_icache_page(unsigned long);
 extern void __invalidate_icache_range(unsigned long, unsigned long);
 extern void __invalidate_dcache_range(unsigned long, unsigned long);
 
+
 #if XCHAL_DCACHE_IS_WRITEBACK
+extern void __flush_invalidate_dcache_all(void);
 extern void __flush_dcache_page(unsigned long);
+extern void __flush_dcache_range(unsigned long, unsigned long);
 extern void __flush_invalidate_dcache_page(unsigned long);
 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
 #else
-# define __flush_dcache_page(p)				do { } while(0)
-# define __flush_invalidate_dcache_page(p) 		do { } while(0)
-# define __flush_invalidate_dcache_range(p,s)		do { } while(0)
+# define __flush_dcache_range(p,s)		do { } while(0)
+# define __flush_dcache_page(p)			do { } while(0)
+# define __flush_invalidate_dcache_page(p) 	__invalidate_dcache_page(p)
+# define __flush_invalidate_dcache_range(p,s)	__invalidate_dcache_range(p,s)
+#endif
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
+#endif
+#if (ICACHE_WAY_SIZE > PAGE_SIZE)
+extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
 #endif
 
 /*
@@ -71,17 +81,21 @@
  * (see also Documentation/cachetlb.txt)
  */
 
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
-#define flush_cache_all()		__flush_invalidate_cache_all();
-#define flush_cache_mm(mm)		__flush_invalidate_cache_all();
-#define flush_cache_dup_mm(mm)		__flush_invalidate_cache_all();
+#define flush_cache_all()						\
+	do {								\
+		__flush_invalidate_dcache_all();			\
+		__invalidate_icache_all();				\
+	} while (0)
 
-#define flush_cache_vmap(start,end)	__flush_invalidate_cache_all();
-#define flush_cache_vunmap(start,end)	__flush_invalidate_cache_all();
+#define flush_cache_mm(mm)		flush_cache_all()
+#define flush_cache_dup_mm(mm)		flush_cache_mm(mm)
+
+#define flush_cache_vmap(start,end)	flush_cache_all()
+#define flush_cache_vunmap(start,end)	flush_cache_all()
 
 extern void flush_dcache_page(struct page*);
-
 extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
 extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
 
@@ -101,24 +115,39 @@
 
 #endif
 
+/* Ensure consistency between data and instruction cache. */
 #define flush_icache_range(start,end) 					\
-	__invalidate_icache_range(start,(end)-(start))
+	do {								\
+		__flush_dcache_range(start, (end) - (start));		\
+		__invalidate_icache_range(start,(end) - (start));	\
+	} while (0)
 
 /* This is not required, see Documentation/cachetlb.txt */
-
-#define	flush_icache_page(vma,page)			do { } while(0)
+#define	flush_icache_page(vma,page)			do { } while (0)
 
 #define flush_dcache_mmap_lock(mapping)			do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)		do { } while (0)
 
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
+extern void copy_to_user_page(struct vm_area_struct*, struct page*,
+		unsigned long, void*, const void*, unsigned long);
+extern void copy_from_user_page(struct vm_area_struct*, struct page*,
+		unsigned long, void*, const void*, unsigned long);
+
+#else
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
+	do {								\
+		memcpy(dst, src, len);					\
+		__flush_dcache_range((unsigned long) dst, len);		\
+		__invalidate_icache_range((unsigned long) dst, len);	\
+	} while (0)
 
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 	memcpy(dst, src, len)
 
+#endif
+
 #endif /* __KERNEL__ */
-
 #endif /* _XTENSA_CACHEFLUSH_H */
-