blob: 4bf621e4146d9d551c861f87f028eb55826282da [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_CACHEFLUSH_H
2#define __ASM_SH_CACHEFLUSH_H
Paul Mundte7bd34a2007-07-31 17:07:28 +09003
Paul Mundtc0fe4782009-08-04 16:02:43 +09004#include <linux/mm.h>
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#ifdef __KERNEL__
7
Paul Mundte7bd34a2007-07-31 17:07:28 +09008#ifdef CONFIG_CACHE_OFF
9/*
10 * Nothing to do when the cache is disabled, initial flush and explicit
11 * disabling is handled at CPU init time.
12 *
13 * See arch/sh/kernel/cpu/init.c:cache_init().
14 */
15#define p3_cache_init() do { } while (0)
16#define flush_cache_all() do { } while (0)
17#define flush_cache_mm(mm) do { } while (0)
18#define flush_cache_dup_mm(mm) do { } while (0)
19#define flush_cache_range(vma, start, end) do { } while (0)
20#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
21#define flush_dcache_page(page) do { } while (0)
22#define flush_icache_range(start, end) do { } while (0)
23#define flush_icache_page(vma,pg) do { } while (0)
24#define flush_dcache_mmap_lock(mapping) do { } while (0)
25#define flush_dcache_mmap_unlock(mapping) do { } while (0)
26#define flush_cache_sigtramp(vaddr) do { } while (0)
27#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
28#define __flush_wback_region(start, size) do { (void)(start); } while (0)
29#define __flush_purge_region(start, size) do { (void)(start); } while (0)
30#define __flush_invalidate_region(start, size) do { (void)(start); } while (0)
31#else
Paul Mundtf15cbe62008-07-29 08:09:44 +090032#include <cpu/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Paul Mundte7bd34a2007-07-31 17:07:28 +090034/*
35 * Consistent DMA requires that the __flush_xxx() primitives must be set
36 * for any of the enabled non-coherent caches (most of the UP CPUs),
37 * regardless of PIPT or VIPT cache configurations.
38 */
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* Flush (write-back only) a region (smaller than a page) */
41extern void __flush_wback_region(void *start, int size);
42/* Flush (write-back & invalidate) a region (smaller than a page) */
43extern void __flush_purge_region(void *start, int size);
44/* Flush (invalidate only) a region (smaller than a page) */
45extern void __flush_invalidate_region(void *start, int size);
Paul Mundte7bd34a2007-07-31 17:07:28 +090046#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Paul Mundtc0fe4782009-08-04 16:02:43 +090048#define ARCH_HAS_FLUSH_ANON_PAGE
49extern void __flush_anon_page(struct page *page, unsigned long);
50
51static inline void flush_anon_page(struct vm_area_struct *vma,
52 struct page *page, unsigned long vmaddr)
53{
54 if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
55 __flush_anon_page(page, vmaddr);
56}
Paul Mundtc0fe4782009-08-04 16:02:43 +090057
Carmelo Amoroso844b43a2008-01-07 13:50:18 +090058#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
59static inline void flush_kernel_dcache_page(struct page *page)
60{
61 flush_dcache_page(page);
62}
63
Paul Mundtba1789e2007-11-05 16:18:16 +090064extern void copy_to_user_page(struct vm_area_struct *vma,
65 struct page *page, unsigned long vaddr, void *dst, const void *src,
66 unsigned long len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Paul Mundtba1789e2007-11-05 16:18:16 +090068extern void copy_from_user_page(struct vm_area_struct *vma,
69 struct page *page, unsigned long vaddr, void *dst, const void *src,
70 unsigned long len);
Paul Mundtba1789e2007-11-05 16:18:16 +090071
72#define flush_cache_vmap(start, end) flush_cache_all()
73#define flush_cache_vunmap(start, end) flush_cache_all()
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Paul Mundt27397422009-08-15 09:19:19 +090075void kmap_coherent_init(void);
76void *kmap_coherent(struct page *page, unsigned long addr);
77void kunmap_coherent(void);
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#endif /* __KERNEL__ */
80#endif /* __ASM_SH_CACHEFLUSH_H */