blob: 32299b7c2b4879a90184cae76a4756d11b559283 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_CACHEFLUSH_H
2#define __ASM_SH_CACHEFLUSH_H
Paul Mundte7bd34a2007-07-31 17:07:28 +09003
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#ifdef __KERNEL__
5
Paul Mundt37443ef2009-08-15 12:29:49 +09006#include <linux/mm.h>
Paul Mundtf9bd71f2009-08-21 16:20:57 +09007
8/*
9 * Cache flushing:
10 *
11 * - flush_cache_all() flushes entire cache
12 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
13 * - flush_cache_dup mm(mm) handles cache flushing when forking
14 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
15 * - flush_cache_range(vma, start, end) flushes a range of pages
16 *
17 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
18 * - flush_icache_range(start, end) flushes(invalidates) a range for icache
19 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
20 * - flush_cache_sigtramp(vaddr) flushes the signal trampoline
21 */
22extern void (*flush_cache_all)(void);
23extern void (*flush_cache_mm)(struct mm_struct *mm);
24extern void (*flush_cache_dup_mm)(struct mm_struct *mm);
25extern void (*flush_cache_page)(struct vm_area_struct *vma,
26 unsigned long addr, unsigned long pfn);
27extern void (*flush_cache_range)(struct vm_area_struct *vma,
28 unsigned long start, unsigned long end);
29extern void (*flush_dcache_page)(struct page *page);
30extern void (*flush_icache_range)(unsigned long start, unsigned long end);
31extern void (*flush_icache_page)(struct vm_area_struct *vma,
32 struct page *page);
33extern void (*flush_cache_sigtramp)(unsigned long address);
34
35extern void (*__flush_wback_region)(void *start, int size);
36extern void (*__flush_purge_region)(void *start, int size);
37extern void (*__flush_invalidate_region)(void *start, int size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Paul Mundtc0fe4782009-08-04 16:02:43 +090039#define ARCH_HAS_FLUSH_ANON_PAGE
40extern void __flush_anon_page(struct page *page, unsigned long);
41
42static inline void flush_anon_page(struct vm_area_struct *vma,
43 struct page *page, unsigned long vmaddr)
44{
45 if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
46 __flush_anon_page(page, vmaddr);
47}
Paul Mundtc0fe4782009-08-04 16:02:43 +090048
Carmelo Amoroso844b43a2008-01-07 13:50:18 +090049#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
50static inline void flush_kernel_dcache_page(struct page *page)
51{
52 flush_dcache_page(page);
53}
54
Paul Mundtba1789e2007-11-05 16:18:16 +090055extern void copy_to_user_page(struct vm_area_struct *vma,
56 struct page *page, unsigned long vaddr, void *dst, const void *src,
57 unsigned long len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Paul Mundtba1789e2007-11-05 16:18:16 +090059extern void copy_from_user_page(struct vm_area_struct *vma,
60 struct page *page, unsigned long vaddr, void *dst, const void *src,
61 unsigned long len);
Paul Mundtba1789e2007-11-05 16:18:16 +090062
63#define flush_cache_vmap(start, end) flush_cache_all()
64#define flush_cache_vunmap(start, end) flush_cache_all()
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Paul Mundt7fbb2d32009-08-15 11:25:32 +090066#define flush_dcache_mmap_lock(mapping) do { } while (0)
67#define flush_dcache_mmap_unlock(mapping) do { } while (0)
68
Paul Mundt27397422009-08-15 09:19:19 +090069void kmap_coherent_init(void);
70void *kmap_coherent(struct page *page, unsigned long addr);
71void kunmap_coherent(void);
72
Paul Mundtdde5e3f2009-08-15 09:49:32 +090073#define PG_dcache_dirty PG_arch_1
74
Paul Mundtecba1062009-08-15 11:05:42 +090075void cpu_cache_init(void);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#endif /* __KERNEL__ */
78#endif /* __ASM_SH_CACHEFLUSH_H */