| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 2 |  * arch/sh/mm/cache.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  * | 
 | 4 |  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka | 
| Paul Mundt | a6198a2 | 2010-01-15 14:21:37 +0900 | [diff] [blame] | 5 |  * Copyright (C) 2002 - 2010  Paul Mundt | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  * Released under the terms of the GNU GPL v2.0. | 
 | 8 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> | 
| Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 10 | #include <linux/init.h> | 
| Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 11 | #include <linux/mutex.h> | 
| Paul Mundt | e06c4e5 | 2007-07-31 13:01:43 +0900 | [diff] [blame] | 12 | #include <linux/fs.h> | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 13 | #include <linux/smp.h> | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 14 | #include <linux/highmem.h> | 
 | 15 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/mmu_context.h> | 
 | 17 | #include <asm/cacheflush.h> | 
 | 18 |  | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 19 | void (*local_flush_cache_all)(void *args) = cache_noop; | 
 | 20 | void (*local_flush_cache_mm)(void *args) = cache_noop; | 
 | 21 | void (*local_flush_cache_dup_mm)(void *args) = cache_noop; | 
 | 22 | void (*local_flush_cache_page)(void *args) = cache_noop; | 
 | 23 | void (*local_flush_cache_range)(void *args) = cache_noop; | 
 | 24 | void (*local_flush_dcache_page)(void *args) = cache_noop; | 
 | 25 | void (*local_flush_icache_range)(void *args) = cache_noop; | 
 | 26 | void (*local_flush_icache_page)(void *args) = cache_noop; | 
 | 27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | 
 | 28 |  | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 29 | void (*__flush_wback_region)(void *start, int size); | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 30 | EXPORT_SYMBOL(__flush_wback_region); | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 31 | void (*__flush_purge_region)(void *start, int size); | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 32 | EXPORT_SYMBOL(__flush_purge_region); | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 33 | void (*__flush_invalidate_region)(void *start, int size); | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 34 | EXPORT_SYMBOL(__flush_invalidate_region); | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 35 |  | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 36 | static inline void noop__flush_region(void *start, int size) | 
 | 37 | { | 
 | 38 | } | 
 | 39 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 40 | static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, | 
 | 41 |                                    int wait) | 
 | 42 | { | 
 | 43 | 	preempt_disable(); | 
| Paul Mundt | a6198a2 | 2010-01-15 14:21:37 +0900 | [diff] [blame] | 44 |  | 
 | 45 | 	/* | 
 | 46 | 	 * It's possible that this gets called early on when IRQs are | 
 | 47 | 	 * still disabled due to ioremapping by the boot CPU, so don't | 
 | 48 | 	 * even attempt IPIs unless there are other CPUs online. | 
 | 49 | 	 */ | 
 | 50 | 	if (num_online_cpus() > 1) | 
 | 51 | 		smp_call_function(func, info, wait); | 
 | 52 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 53 | 	func(info); | 
| Paul Mundt | a6198a2 | 2010-01-15 14:21:37 +0900 | [diff] [blame] | 54 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 55 | 	preempt_enable(); | 
 | 56 | } | 
 | 57 |  | 
| Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 58 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 
 | 59 | 		       unsigned long vaddr, void *dst, const void *src, | 
 | 60 | 		       unsigned long len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | { | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 62 | 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 63 | 	    test_bit(PG_dcache_clean, &page->flags)) { | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 64 | 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 
 | 65 | 		memcpy(vto, src, len); | 
| Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 66 | 		kunmap_coherent(vto); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 67 | 	} else { | 
 | 68 | 		memcpy(dst, src, len); | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 69 | 		if (boot_cpu_data.dcache.n_aliases) | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 70 | 			clear_bit(PG_dcache_clean, &page->flags); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 71 | 	} | 
| Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 72 |  | 
 | 73 | 	if (vma->vm_flags & VM_EXEC) | 
 | 74 | 		flush_cache_page(vma, vaddr, page_to_pfn(page)); | 
 | 75 | } | 
 | 76 |  | 
 | 77 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | 
 | 78 | 			 unsigned long vaddr, void *dst, const void *src, | 
 | 79 | 			 unsigned long len) | 
 | 80 | { | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 81 | 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 82 | 	    test_bit(PG_dcache_clean, &page->flags)) { | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 83 | 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 
 | 84 | 		memcpy(dst, vfrom, len); | 
| Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 85 | 		kunmap_coherent(vfrom); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 86 | 	} else { | 
 | 87 | 		memcpy(dst, src, len); | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 88 | 		if (boot_cpu_data.dcache.n_aliases) | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 89 | 			clear_bit(PG_dcache_clean, &page->flags); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 90 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | } | 
| Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 92 |  | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 93 | void copy_user_highpage(struct page *to, struct page *from, | 
 | 94 | 			unsigned long vaddr, struct vm_area_struct *vma) | 
 | 95 | { | 
 | 96 | 	void *vfrom, *vto; | 
 | 97 |  | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 98 | 	vto = kmap_atomic(to, KM_USER1); | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 99 |  | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 100 | 	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 101 | 	    test_bit(PG_dcache_clean, &from->flags)) { | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 102 | 		vfrom = kmap_coherent(from, vaddr); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 103 | 		copy_page(vto, vfrom); | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 104 | 		kunmap_coherent(vfrom); | 
 | 105 | 	} else { | 
 | 106 | 		vfrom = kmap_atomic(from, KM_USER0); | 
 | 107 | 		copy_page(vto, vfrom); | 
 | 108 | 		kunmap_atomic(vfrom, KM_USER0); | 
 | 109 | 	} | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 110 |  | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 111 | 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | 
 | 112 | 		__flush_purge_region(vto, PAGE_SIZE); | 
 | 113 |  | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 114 | 	kunmap_atomic(vto, KM_USER1); | 
 | 115 | 	/* Make sure this page is cleared on other CPU's too before using it */ | 
 | 116 | 	smp_wmb(); | 
 | 117 | } | 
 | 118 | EXPORT_SYMBOL(copy_user_highpage); | 
| Paul Mundt | dfff0fa | 2009-07-27 20:53:22 +0900 | [diff] [blame] | 119 |  | 
 | 120 | void clear_user_highpage(struct page *page, unsigned long vaddr) | 
 | 121 | { | 
 | 122 | 	void *kaddr = kmap_atomic(page, KM_USER0); | 
 | 123 |  | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 124 | 	clear_page(kaddr); | 
| Paul Mundt | dfff0fa | 2009-07-27 20:53:22 +0900 | [diff] [blame] | 125 |  | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 126 | 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | 
 | 127 | 		__flush_purge_region(kaddr, PAGE_SIZE); | 
| Paul Mundt | dfff0fa | 2009-07-27 20:53:22 +0900 | [diff] [blame] | 128 |  | 
 | 129 | 	kunmap_atomic(kaddr, KM_USER0); | 
 | 130 | } | 
 | 131 | EXPORT_SYMBOL(clear_user_highpage); | 
| Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 132 |  | 
 | 133 | void __update_cache(struct vm_area_struct *vma, | 
 | 134 | 		    unsigned long address, pte_t pte) | 
 | 135 | { | 
 | 136 | 	struct page *page; | 
 | 137 | 	unsigned long pfn = pte_pfn(pte); | 
 | 138 |  | 
 | 139 | 	if (!boot_cpu_data.dcache.n_aliases) | 
 | 140 | 		return; | 
 | 141 |  | 
 | 142 | 	page = pfn_to_page(pfn); | 
| Paul Mundt | 964f7e5 | 2009-10-13 11:18:34 +0900 | [diff] [blame] | 143 | 	if (pfn_valid(pfn)) { | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 144 | 		int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags); | 
| Markus Pietrek | 76382b5 | 2009-12-24 15:12:02 +0900 | [diff] [blame] | 145 | 		if (dirty) | 
 | 146 | 			__flush_purge_region(page_address(page), PAGE_SIZE); | 
| Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 147 | 	} | 
 | 148 | } | 
| Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 149 |  | 
 | 150 | void __flush_anon_page(struct page *page, unsigned long vmaddr) | 
 | 151 | { | 
 | 152 | 	unsigned long addr = (unsigned long) page_address(page); | 
 | 153 |  | 
 | 154 | 	if (pages_do_alias(addr, vmaddr)) { | 
 | 155 | 		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 156 | 		    test_bit(PG_dcache_clean, &page->flags)) { | 
| Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 157 | 			void *kaddr; | 
 | 158 |  | 
 | 159 | 			kaddr = kmap_coherent(page, vmaddr); | 
| Paul Mundt | 6e4154d | 2009-09-08 16:21:00 +0900 | [diff] [blame] | 160 | 			/* XXX.. For now kunmap_coherent() does a purge */ | 
 | 161 | 			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */ | 
| Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 162 | 			kunmap_coherent(kaddr); | 
| Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 163 | 		} else | 
| Paul Mundt | 6e4154d | 2009-09-08 16:21:00 +0900 | [diff] [blame] | 164 | 			__flush_purge_region((void *)addr, PAGE_SIZE); | 
| Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 165 | 	} | 
 | 166 | } | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 167 |  | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 168 | void flush_cache_all(void) | 
 | 169 | { | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 170 | 	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 171 | } | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 172 | EXPORT_SYMBOL(flush_cache_all); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 173 |  | 
 | 174 | void flush_cache_mm(struct mm_struct *mm) | 
 | 175 | { | 
| Paul Mundt | 654d364 | 2009-09-09 14:04:06 +0900 | [diff] [blame] | 176 | 	if (boot_cpu_data.dcache.n_aliases == 0) | 
 | 177 | 		return; | 
 | 178 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 179 | 	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 180 | } | 
 | 181 |  | 
 | 182 | void flush_cache_dup_mm(struct mm_struct *mm) | 
 | 183 | { | 
| Paul Mundt | 654d364 | 2009-09-09 14:04:06 +0900 | [diff] [blame] | 184 | 	if (boot_cpu_data.dcache.n_aliases == 0) | 
 | 185 | 		return; | 
 | 186 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 187 | 	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 188 | } | 
 | 189 |  | 
 | 190 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, | 
 | 191 | 		      unsigned long pfn) | 
 | 192 | { | 
 | 193 | 	struct flusher_data data; | 
 | 194 |  | 
 | 195 | 	data.vma = vma; | 
 | 196 | 	data.addr1 = addr; | 
 | 197 | 	data.addr2 = pfn; | 
 | 198 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 199 | 	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 200 | } | 
 | 201 |  | 
 | 202 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 
 | 203 | 		       unsigned long end) | 
 | 204 | { | 
 | 205 | 	struct flusher_data data; | 
 | 206 |  | 
 | 207 | 	data.vma = vma; | 
 | 208 | 	data.addr1 = start; | 
 | 209 | 	data.addr2 = end; | 
 | 210 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 211 | 	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 212 | } | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 213 | EXPORT_SYMBOL(flush_cache_range); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 214 |  | 
 | 215 | void flush_dcache_page(struct page *page) | 
 | 216 | { | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 217 | 	cacheop_on_each_cpu(local_flush_dcache_page, page, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 218 | } | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 219 | EXPORT_SYMBOL(flush_dcache_page); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 220 |  | 
 | 221 | void flush_icache_range(unsigned long start, unsigned long end) | 
 | 222 | { | 
 | 223 | 	struct flusher_data data; | 
 | 224 |  | 
 | 225 | 	data.vma = NULL; | 
 | 226 | 	data.addr1 = start; | 
 | 227 | 	data.addr2 = end; | 
 | 228 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 229 | 	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 230 | } | 
 | 231 |  | 
 | 232 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | 
 | 233 | { | 
 | 234 | 	/* Nothing uses the VMA, so just pass the struct page along */ | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 235 | 	cacheop_on_each_cpu(local_flush_icache_page, page, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 236 | } | 
 | 237 |  | 
 | 238 | void flush_cache_sigtramp(unsigned long address) | 
 | 239 | { | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 240 | 	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 241 | } | 
 | 242 |  | 
| Paul Mundt | 27d59ec | 2009-08-15 11:11:16 +0900 | [diff] [blame] | 243 | static void compute_alias(struct cache_info *c) | 
 | 244 | { | 
 | 245 | 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | 
 | 246 | 	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; | 
 | 247 | } | 
 | 248 |  | 
 | 249 | static void __init emit_cache_params(void) | 
 | 250 | { | 
 | 251 | 	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | 
 | 252 | 		boot_cpu_data.icache.ways, | 
 | 253 | 		boot_cpu_data.icache.sets, | 
 | 254 | 		boot_cpu_data.icache.way_incr); | 
 | 255 | 	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | 
 | 256 | 		boot_cpu_data.icache.entry_mask, | 
 | 257 | 		boot_cpu_data.icache.alias_mask, | 
 | 258 | 		boot_cpu_data.icache.n_aliases); | 
 | 259 | 	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | 
 | 260 | 		boot_cpu_data.dcache.ways, | 
 | 261 | 		boot_cpu_data.dcache.sets, | 
 | 262 | 		boot_cpu_data.dcache.way_incr); | 
 | 263 | 	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | 
 | 264 | 		boot_cpu_data.dcache.entry_mask, | 
 | 265 | 		boot_cpu_data.dcache.alias_mask, | 
 | 266 | 		boot_cpu_data.dcache.n_aliases); | 
 | 267 |  | 
 | 268 | 	/* | 
 | 269 | 	 * Emit Secondary Cache parameters if the CPU has a probed L2. | 
 | 270 | 	 */ | 
 | 271 | 	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | 
 | 272 | 		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | 
 | 273 | 			boot_cpu_data.scache.ways, | 
 | 274 | 			boot_cpu_data.scache.sets, | 
 | 275 | 			boot_cpu_data.scache.way_incr); | 
 | 276 | 		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | 
 | 277 | 			boot_cpu_data.scache.entry_mask, | 
 | 278 | 			boot_cpu_data.scache.alias_mask, | 
 | 279 | 			boot_cpu_data.scache.n_aliases); | 
 | 280 | 	} | 
 | 281 | } | 
 | 282 |  | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 283 | void __init cpu_cache_init(void) | 
 | 284 | { | 
| Paul Mundt | 3af539e | 2009-11-12 17:03:28 +0900 | [diff] [blame] | 285 | 	unsigned int cache_disabled = 0; | 
 | 286 |  | 
 | 287 | #ifdef CCR | 
 | 288 | 	cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | 
 | 289 | #endif | 
| Magnus Damm | 5fb80ae | 2009-10-16 14:38:48 +0900 | [diff] [blame] | 290 |  | 
| Paul Mundt | 27d59ec | 2009-08-15 11:11:16 +0900 | [diff] [blame] | 291 | 	compute_alias(&boot_cpu_data.icache); | 
 | 292 | 	compute_alias(&boot_cpu_data.dcache); | 
 | 293 | 	compute_alias(&boot_cpu_data.scache); | 
 | 294 |  | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 295 | 	__flush_wback_region		= noop__flush_region; | 
 | 296 | 	__flush_purge_region		= noop__flush_region; | 
 | 297 | 	__flush_invalidate_region	= noop__flush_region; | 
 | 298 |  | 
| Magnus Damm | 5fb80ae | 2009-10-16 14:38:48 +0900 | [diff] [blame] | 299 | 	/* | 
 | 300 | 	 * No flushing is necessary in the disabled cache case so we can | 
 | 301 | 	 * just keep the noop functions in local_flush_..() and __flush_..() | 
 | 302 | 	 */ | 
 | 303 | 	if (unlikely(cache_disabled)) | 
 | 304 | 		goto skip; | 
 | 305 |  | 
| Paul Mundt | 109b44a | 2009-08-15 12:35:15 +0900 | [diff] [blame] | 306 | 	if (boot_cpu_data.family == CPU_FAMILY_SH2) { | 
 | 307 | 		extern void __weak sh2_cache_init(void); | 
 | 308 |  | 
 | 309 | 		sh2_cache_init(); | 
 | 310 | 	} | 
 | 311 |  | 
| Paul Mundt | a58e1a2 | 2009-08-15 12:38:29 +0900 | [diff] [blame] | 312 | 	if (boot_cpu_data.family == CPU_FAMILY_SH2A) { | 
 | 313 | 		extern void __weak sh2a_cache_init(void); | 
 | 314 |  | 
 | 315 | 		sh2a_cache_init(); | 
 | 316 | 	} | 
 | 317 |  | 
| Paul Mundt | 79f1c9d | 2009-08-15 12:42:55 +0900 | [diff] [blame] | 318 | 	if (boot_cpu_data.family == CPU_FAMILY_SH3) { | 
 | 319 | 		extern void __weak sh3_cache_init(void); | 
 | 320 |  | 
 | 321 | 		sh3_cache_init(); | 
| Paul Mundt | 0d051d9 | 2009-08-15 12:53:39 +0900 | [diff] [blame] | 322 |  | 
 | 323 | 		if ((boot_cpu_data.type == CPU_SH7705) && | 
 | 324 | 		    (boot_cpu_data.dcache.sets == 512)) { | 
 | 325 | 			extern void __weak sh7705_cache_init(void); | 
 | 326 |  | 
 | 327 | 			sh7705_cache_init(); | 
 | 328 | 		} | 
| Paul Mundt | 79f1c9d | 2009-08-15 12:42:55 +0900 | [diff] [blame] | 329 | 	} | 
 | 330 |  | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 331 | 	if ((boot_cpu_data.family == CPU_FAMILY_SH4) || | 
 | 332 | 	    (boot_cpu_data.family == CPU_FAMILY_SH4A) || | 
 | 333 | 	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { | 
 | 334 | 		extern void __weak sh4_cache_init(void); | 
 | 335 |  | 
 | 336 | 		sh4_cache_init(); | 
| Paul Mundt | 3cf6fa1 | 2010-04-19 17:27:17 +0900 | [diff] [blame] | 337 |  | 
 | 338 | 		if ((boot_cpu_data.type == CPU_SH7786) || | 
 | 339 | 		    (boot_cpu_data.type == CPU_SHX3)) { | 
 | 340 | 			extern void __weak shx3_cache_init(void); | 
 | 341 |  | 
 | 342 | 			shx3_cache_init(); | 
 | 343 | 		} | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 344 | 	} | 
| Paul Mundt | 27d59ec | 2009-08-15 11:11:16 +0900 | [diff] [blame] | 345 |  | 
| Paul Mundt | 2b43151 | 2009-08-16 02:16:44 +0900 | [diff] [blame] | 346 | 	if (boot_cpu_data.family == CPU_FAMILY_SH5) { | 
 | 347 | 		extern void __weak sh5_cache_init(void); | 
 | 348 |  | 
 | 349 | 		sh5_cache_init(); | 
 | 350 | 	} | 
 | 351 |  | 
| Magnus Damm | 5fb80ae | 2009-10-16 14:38:48 +0900 | [diff] [blame] | 352 | skip: | 
| Paul Mundt | 27d59ec | 2009-08-15 11:11:16 +0900 | [diff] [blame] | 353 | 	emit_cache_params(); | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 354 | } |