| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _PARISC_CACHEFLUSH_H | 
 | 2 | #define _PARISC_CACHEFLUSH_H | 
 | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> | 
| John David Anglin | 210501a | 2010-04-11 16:36:14 +0000 | [diff] [blame] | 5 | #include <linux/uaccess.h> | 
| James Bottomley | b7d4581 | 2011-04-15 12:37:22 -0500 | [diff] [blame] | 6 | #include <asm/tlbflush.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  | 
 | 8 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". | 
 | 9 |  * Unfortunately, that doesn't apply to PA-RISC. */ | 
 | 10 |  | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 11 | /* Internal implementation */ | 
 | 12 | void flush_data_cache_local(void *);  /* flushes local data-cache only */ | 
 | 13 | void flush_instruction_cache_local(void *); /* flushes local code-cache only */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #ifdef CONFIG_SMP | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 15 | void flush_data_cache(void); /* flushes data-cache only (all processors) */ | 
 | 16 | void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #else | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 18 | #define flush_data_cache() flush_data_cache_local(NULL) | 
 | 19 | #define flush_instruction_cache() flush_instruction_cache_local(NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #endif | 
 | 21 |  | 
| Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 22 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) | 
 | 23 |  | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 24 | void flush_user_icache_range_asm(unsigned long, unsigned long); | 
 | 25 | void flush_kernel_icache_range_asm(unsigned long, unsigned long); | 
 | 26 | void flush_user_dcache_range_asm(unsigned long, unsigned long); | 
 | 27 | void flush_kernel_dcache_range_asm(unsigned long, unsigned long); | 
 | 28 | void flush_kernel_dcache_page_asm(void *); | 
 | 29 | void flush_kernel_icache_page(void *); | 
| Matthew Wilcox | 3735313 | 2006-12-15 09:29:39 -0700 | [diff] [blame] | 30 | void flush_user_dcache_range(unsigned long, unsigned long); | 
 | 31 | void flush_user_icache_range(unsigned long, unsigned long); | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 32 |  | 
 | 33 | /* Cache flush operations */ | 
 | 34 |  | 
 | 35 | void flush_cache_all_local(void); | 
 | 36 | void flush_cache_all(void); | 
 | 37 | void flush_cache_mm(struct mm_struct *mm); | 
 | 38 |  | 
| James Bottomley | 8e1964a | 2011-01-20 12:54:18 -0600 | [diff] [blame] | 39 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 
 | 40 | void flush_kernel_dcache_page_addr(void *addr); | 
 | 41 | static inline void flush_kernel_dcache_page(struct page *page) | 
 | 42 | { | 
 | 43 | 	flush_kernel_dcache_page_addr(page_address(page)); | 
 | 44 | } | 
 | 45 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #define flush_kernel_dcache_range(start,size) \ | 
 | 47 | 	flush_kernel_dcache_range_asm((start), (start)+(size)); | 
| James Bottomley | ef7cc35 | 2010-01-25 11:42:21 -0600 | [diff] [blame] | 48 | /* vmap range flushes and invalidates.  Architecturally, we don't need | 
 | 49 |  * the invalidate, because the CPU should refuse to speculate once an | 
 | 50 |  * area has been flushed, so invalidate is left empty */ | 
 | 51 | static inline void flush_kernel_vmap_range(void *vaddr, int size) | 
 | 52 | { | 
 | 53 | 	unsigned long start = (unsigned long)vaddr; | 
 | 54 |  | 
 | 55 | 	flush_kernel_dcache_range_asm(start, start + size); | 
 | 56 | } | 
 | 57 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | 
 | 58 | { | 
| James Bottomley | 8e1964a | 2011-01-20 12:54:18 -0600 | [diff] [blame] | 59 | 	unsigned long start = (unsigned long)vaddr; | 
 | 60 | 	void *cursor = vaddr; | 
 | 61 |  | 
 | 62 | 	for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { | 
 | 63 | 		struct page *page = vmalloc_to_page(cursor); | 
 | 64 |  | 
 | 65 | 		if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 
 | 66 | 			flush_kernel_dcache_page(page); | 
 | 67 | 	} | 
 | 68 | 	flush_kernel_dcache_range_asm(start, start + size); | 
| James Bottomley | ef7cc35 | 2010-01-25 11:42:21 -0600 | [diff] [blame] | 69 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #define flush_cache_vmap(start, end)		flush_cache_all() | 
 | 72 | #define flush_cache_vunmap(start, end)		flush_cache_all() | 
 | 73 |  | 
| Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 74 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | extern void flush_dcache_page(struct page *page); | 
 | 76 |  | 
 | 77 | #define flush_dcache_mmap_lock(mapping) \ | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 78 | 	spin_lock_irq(&(mapping)->tree_lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | #define flush_dcache_mmap_unlock(mapping) \ | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 80 | 	spin_unlock_irq(&(mapping)->tree_lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 |  | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 82 | #define flush_icache_page(vma,page)	do { 		\ | 
 | 83 | 	flush_kernel_dcache_page(page);			\ | 
 | 84 | 	flush_kernel_icache_page(page_address(page)); 	\ | 
 | 85 | } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 |  | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 87 | #define flush_icache_range(s,e)		do { 		\ | 
 | 88 | 	flush_kernel_dcache_range_asm(s,e); 		\ | 
 | 89 | 	flush_kernel_icache_range_asm(s,e); 		\ | 
 | 90 | } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 |  | 
 | 92 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 
 | 93 | do { \ | 
 | 94 | 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \ | 
 | 95 | 	memcpy(dst, src, len); \ | 
 | 96 | 	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \ | 
 | 97 | } while (0) | 
 | 98 |  | 
 | 99 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 
 | 100 | do { \ | 
 | 101 | 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \ | 
 | 102 | 	memcpy(dst, src, len); \ | 
 | 103 | } while (0) | 
 | 104 |  | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 105 | void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn); | 
 | 106 | void flush_cache_range(struct vm_area_struct *vma, | 
 | 107 | 		unsigned long start, unsigned long end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 |  | 
| James Bottomley | f311847 | 2010-12-22 10:22:11 -0600 | [diff] [blame] | 109 | /* defined in pacache.S exported in cache.c used by flush_anon_page */ | 
 | 110 | void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); | 
 | 111 |  | 
| Randolph Chung | d6ce862 | 2006-12-12 05:51:54 -0800 | [diff] [blame] | 112 | #define ARCH_HAS_FLUSH_ANON_PAGE | 
| James Bottomley | ab43227 | 2006-03-22 08:28:59 -0700 | [diff] [blame] | 113 | static inline void | 
| Russell King | a6f36be | 2006-12-30 22:24:19 +0000 | [diff] [blame] | 114 | flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | 
| James Bottomley | ab43227 | 2006-03-22 08:28:59 -0700 | [diff] [blame] | 115 | { | 
| James Bottomley | b7d4581 | 2011-04-15 12:37:22 -0500 | [diff] [blame] | 116 | 	if (PageAnon(page)) { | 
 | 117 | 		flush_tlb_page(vma, vmaddr); | 
| John David Anglin | 027f27c | 2013-02-02 23:41:24 +0000 | [diff] [blame] | 118 | 		preempt_disable(); | 
| James Bottomley | f311847 | 2010-12-22 10:22:11 -0600 | [diff] [blame] | 119 | 		flush_dcache_page_asm(page_to_phys(page), vmaddr); | 
| John David Anglin | 027f27c | 2013-02-02 23:41:24 +0000 | [diff] [blame] | 120 | 		preempt_enable(); | 
| James Bottomley | b7d4581 | 2011-04-15 12:37:22 -0500 | [diff] [blame] | 121 | 	} | 
| James Bottomley | ab43227 | 2006-03-22 08:28:59 -0700 | [diff] [blame] | 122 | } | 
| James Bottomley | ab43227 | 2006-03-22 08:28:59 -0700 | [diff] [blame] | 123 |  | 
| Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 124 | #ifdef CONFIG_DEBUG_RODATA | 
 | 125 | void mark_rodata_ro(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | #endif | 
| Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 127 |  | 
| James Bottomley | 20f4d3c | 2006-08-23 09:00:04 -0700 | [diff] [blame] | 128 | #ifdef CONFIG_PA8X00 | 
 | 129 | /* Only pa8800, pa8900 needs this */ | 
| Kyle McMartin | bb73501 | 2009-04-02 02:40:41 +0000 | [diff] [blame] | 130 |  | 
 | 131 | #include <asm/kmap_types.h> | 
 | 132 |  | 
| James Bottomley | 20f4d3c | 2006-08-23 09:00:04 -0700 | [diff] [blame] | 133 | #define ARCH_HAS_KMAP | 
 | 134 |  | 
 | 135 | void kunmap_parisc(void *addr); | 
 | 136 |  | 
 | 137 | static inline void *kmap(struct page *page) | 
 | 138 | { | 
 | 139 | 	might_sleep(); | 
 | 140 | 	return page_address(page); | 
 | 141 | } | 
 | 142 |  | 
| John David Anglin | 87be2f8 | 2013-04-23 00:23:50 +0000 | [diff] [blame] | 143 | static inline void kunmap(struct page *page) | 
 | 144 | { | 
 | 145 | 	kunmap_parisc(page_address(page)); | 
 | 146 | } | 
| James Bottomley | 20f4d3c | 2006-08-23 09:00:04 -0700 | [diff] [blame] | 147 |  | 
| Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 148 | static inline void *kmap_atomic(struct page *page) | 
| John David Anglin | 210501a | 2010-04-11 16:36:14 +0000 | [diff] [blame] | 149 | { | 
 | 150 | 	pagefault_disable(); | 
 | 151 | 	return page_address(page); | 
 | 152 | } | 
| James Bottomley | 20f4d3c | 2006-08-23 09:00:04 -0700 | [diff] [blame] | 153 |  | 
| James Bottomley | 765aaaf | 2010-10-28 10:14:41 -0500 | [diff] [blame] | 154 | static inline void __kunmap_atomic(void *addr) | 
| John David Anglin | 210501a | 2010-04-11 16:36:14 +0000 | [diff] [blame] | 155 | { | 
 | 156 | 	kunmap_parisc(addr); | 
 | 157 | 	pagefault_enable(); | 
 | 158 | } | 
| James Bottomley | 20f4d3c | 2006-08-23 09:00:04 -0700 | [diff] [blame] | 159 |  | 
| James Bottomley | 765aaaf | 2010-10-28 10:14:41 -0500 | [diff] [blame] | 160 | #define kmap_atomic_prot(page, prot)	kmap_atomic(page) | 
 | 161 | #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn)) | 
| James Bottomley | 20f4d3c | 2006-08-23 09:00:04 -0700 | [diff] [blame] | 162 | #define kmap_atomic_to_page(ptr)	virt_to_page(ptr) | 
 | 163 | #endif | 
 | 164 |  | 
| Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 165 | #endif /* _PARISC_CACHEFLUSH_H */ | 
 | 166 |  |