| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 2 | * arch/sh/mm/cache.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (C) 1999, 2000, 2002  Niibe Yutaka | 
| Paul Mundt | a6198a2 | 2010-01-15 14:21:37 +0900 | [diff] [blame] | 5 | * Copyright (C) 2002 - 2010  Paul Mundt | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * | 
|  | 7 | * Released under the terms of the GNU GPL v2.0. | 
|  | 8 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> | 
| Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 10 | #include <linux/init.h> | 
| Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 11 | #include <linux/mutex.h> | 
| Paul Mundt | e06c4e5 | 2007-07-31 13:01:43 +0900 | [diff] [blame] | 12 | #include <linux/fs.h> | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 13 | #include <linux/smp.h> | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 14 | #include <linux/highmem.h> | 
|  | 15 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/mmu_context.h> | 
|  | 17 | #include <asm/cacheflush.h> | 
|  | 18 |  | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 19 | void (*local_flush_cache_all)(void *args) = cache_noop; | 
|  | 20 | void (*local_flush_cache_mm)(void *args) = cache_noop; | 
|  | 21 | void (*local_flush_cache_dup_mm)(void *args) = cache_noop; | 
|  | 22 | void (*local_flush_cache_page)(void *args) = cache_noop; | 
|  | 23 | void (*local_flush_cache_range)(void *args) = cache_noop; | 
|  | 24 | void (*local_flush_dcache_page)(void *args) = cache_noop; | 
|  | 25 | void (*local_flush_icache_range)(void *args) = cache_noop; | 
|  | 26 | void (*local_flush_icache_page)(void *args) = cache_noop; | 
|  | 27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | 
|  | 28 |  | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 29 | void (*__flush_wback_region)(void *start, int size); | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 30 | EXPORT_SYMBOL(__flush_wback_region); | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 31 | void (*__flush_purge_region)(void *start, int size); | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 32 | EXPORT_SYMBOL(__flush_purge_region); | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 33 | void (*__flush_invalidate_region)(void *start, int size); | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 34 | EXPORT_SYMBOL(__flush_invalidate_region); | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 35 |  | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 36 | static inline void noop__flush_region(void *start, int size) | 
|  | 37 | { | 
|  | 38 | } | 
|  | 39 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 40 | static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, | 
|  | 41 | int wait) | 
|  | 42 | { | 
|  | 43 | preempt_disable(); | 
| Paul Mundt | a6198a2 | 2010-01-15 14:21:37 +0900 | [diff] [blame] | 44 |  | 
|  | 45 | /* | 
|  | 46 | * It's possible that this gets called early on when IRQs are | 
|  | 47 | * still disabled due to ioremapping by the boot CPU, so don't | 
|  | 48 | * even attempt IPIs unless there are other CPUs online. | 
|  | 49 | */ | 
|  | 50 | if (num_online_cpus() > 1) | 
|  | 51 | smp_call_function(func, info, wait); | 
|  | 52 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 53 | func(info); | 
| Paul Mundt | a6198a2 | 2010-01-15 14:21:37 +0900 | [diff] [blame] | 54 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 55 | preempt_enable(); | 
|  | 56 | } | 
|  | 57 |  | 
| Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 58 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 
|  | 59 | unsigned long vaddr, void *dst, const void *src, | 
|  | 60 | unsigned long len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | { | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 62 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 63 | test_bit(PG_dcache_clean, &page->flags)) { | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 64 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 
|  | 65 | memcpy(vto, src, len); | 
| Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 66 | kunmap_coherent(vto); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 67 | } else { | 
|  | 68 | memcpy(dst, src, len); | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 69 | if (boot_cpu_data.dcache.n_aliases) | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 70 | clear_bit(PG_dcache_clean, &page->flags); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 71 | } | 
| Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 72 |  | 
|  | 73 | if (vma->vm_flags & VM_EXEC) | 
|  | 74 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | 
|  | 78 | unsigned long vaddr, void *dst, const void *src, | 
|  | 79 | unsigned long len) | 
|  | 80 | { | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 81 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 82 | test_bit(PG_dcache_clean, &page->flags)) { | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 83 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 
|  | 84 | memcpy(dst, vfrom, len); | 
| Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 85 | kunmap_coherent(vfrom); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 86 | } else { | 
|  | 87 | memcpy(dst, src, len); | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 88 | if (boot_cpu_data.dcache.n_aliases) | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 89 | clear_bit(PG_dcache_clean, &page->flags); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 90 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | } | 
| Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 92 |  | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 93 | void copy_user_highpage(struct page *to, struct page *from, | 
|  | 94 | unsigned long vaddr, struct vm_area_struct *vma) | 
|  | 95 | { | 
|  | 96 | void *vfrom, *vto; | 
|  | 97 |  | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 98 | vto = kmap_atomic(to, KM_USER1); | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 99 |  | 
| Paul Mundt | 0dfae7d | 2009-07-27 21:30:17 +0900 | [diff] [blame] | 100 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 101 | test_bit(PG_dcache_clean, &from->flags)) { | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 102 | vfrom = kmap_coherent(from, vaddr); | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 103 | copy_page(vto, vfrom); | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 104 | kunmap_coherent(vfrom); | 
|  | 105 | } else { | 
|  | 106 | vfrom = kmap_atomic(from, KM_USER0); | 
|  | 107 | copy_page(vto, vfrom); | 
|  | 108 | kunmap_atomic(vfrom, KM_USER0); | 
|  | 109 | } | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 110 |  | 
| Stuart Menefy | a25bbe1 | 2011-01-31 17:50:29 +0000 | [diff] [blame] | 111 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || | 
|  | 112 | (vma->vm_flags & VM_EXEC)) | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 113 | __flush_purge_region(vto, PAGE_SIZE); | 
|  | 114 |  | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 115 | kunmap_atomic(vto, KM_USER1); | 
|  | 116 | /* Make sure this page is cleared on other CPU's too before using it */ | 
|  | 117 | smp_wmb(); | 
|  | 118 | } | 
|  | 119 | EXPORT_SYMBOL(copy_user_highpage); | 
| Paul Mundt | dfff0fa | 2009-07-27 20:53:22 +0900 | [diff] [blame] | 120 |  | 
|  | 121 | void clear_user_highpage(struct page *page, unsigned long vaddr) | 
|  | 122 | { | 
|  | 123 | void *kaddr = kmap_atomic(page, KM_USER0); | 
|  | 124 |  | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 125 | clear_page(kaddr); | 
| Paul Mundt | dfff0fa | 2009-07-27 20:53:22 +0900 | [diff] [blame] | 126 |  | 
| Paul Mundt | 7e01c94 | 2009-12-04 15:14:52 +0900 | [diff] [blame] | 127 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | 
|  | 128 | __flush_purge_region(kaddr, PAGE_SIZE); | 
| Paul Mundt | dfff0fa | 2009-07-27 20:53:22 +0900 | [diff] [blame] | 129 |  | 
|  | 130 | kunmap_atomic(kaddr, KM_USER0); | 
|  | 131 | } | 
|  | 132 | EXPORT_SYMBOL(clear_user_highpage); | 
| Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 133 |  | 
|  | 134 | void __update_cache(struct vm_area_struct *vma, | 
|  | 135 | unsigned long address, pte_t pte) | 
|  | 136 | { | 
|  | 137 | struct page *page; | 
|  | 138 | unsigned long pfn = pte_pfn(pte); | 
|  | 139 |  | 
|  | 140 | if (!boot_cpu_data.dcache.n_aliases) | 
|  | 141 | return; | 
|  | 142 |  | 
|  | 143 | page = pfn_to_page(pfn); | 
| Paul Mundt | 964f7e5 | 2009-10-13 11:18:34 +0900 | [diff] [blame] | 144 | if (pfn_valid(pfn)) { | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 145 | int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags); | 
| Markus Pietrek | 76382b5 | 2009-12-24 15:12:02 +0900 | [diff] [blame] | 146 | if (dirty) | 
|  | 147 | __flush_purge_region(page_address(page), PAGE_SIZE); | 
| Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 148 | } | 
|  | 149 | } | 
| Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 150 |  | 
|  | 151 | void __flush_anon_page(struct page *page, unsigned long vmaddr) | 
|  | 152 | { | 
|  | 153 | unsigned long addr = (unsigned long) page_address(page); | 
|  | 154 |  | 
|  | 155 | if (pages_do_alias(addr, vmaddr)) { | 
|  | 156 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | 
| Paul Mundt | 55661fc | 2010-12-01 15:39:51 +0900 | [diff] [blame] | 157 | test_bit(PG_dcache_clean, &page->flags)) { | 
| Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 158 | void *kaddr; | 
|  | 159 |  | 
|  | 160 | kaddr = kmap_coherent(page, vmaddr); | 
| Paul Mundt | 6e4154d | 2009-09-08 16:21:00 +0900 | [diff] [blame] | 161 | /* XXX.. For now kunmap_coherent() does a purge */ | 
|  | 162 | /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */ | 
| Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 163 | kunmap_coherent(kaddr); | 
| Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 164 | } else | 
| Paul Mundt | 6e4154d | 2009-09-08 16:21:00 +0900 | [diff] [blame] | 165 | __flush_purge_region((void *)addr, PAGE_SIZE); | 
| Paul Mundt | c0fe478 | 2009-08-04 16:02:43 +0900 | [diff] [blame] | 166 | } | 
|  | 167 | } | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 168 |  | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 169 | void flush_cache_all(void) | 
|  | 170 | { | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 171 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 172 | } | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 173 | EXPORT_SYMBOL(flush_cache_all); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 174 |  | 
|  | 175 | void flush_cache_mm(struct mm_struct *mm) | 
|  | 176 | { | 
| Paul Mundt | 654d364 | 2009-09-09 14:04:06 +0900 | [diff] [blame] | 177 | if (boot_cpu_data.dcache.n_aliases == 0) | 
|  | 178 | return; | 
|  | 179 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 180 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 181 | } | 
|  | 182 |  | 
|  | 183 | void flush_cache_dup_mm(struct mm_struct *mm) | 
|  | 184 | { | 
| Paul Mundt | 654d364 | 2009-09-09 14:04:06 +0900 | [diff] [blame] | 185 | if (boot_cpu_data.dcache.n_aliases == 0) | 
|  | 186 | return; | 
|  | 187 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 188 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 189 | } | 
|  | 190 |  | 
|  | 191 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, | 
|  | 192 | unsigned long pfn) | 
|  | 193 | { | 
|  | 194 | struct flusher_data data; | 
|  | 195 |  | 
|  | 196 | data.vma = vma; | 
|  | 197 | data.addr1 = addr; | 
|  | 198 | data.addr2 = pfn; | 
|  | 199 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 200 | cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 201 | } | 
|  | 202 |  | 
|  | 203 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 
|  | 204 | unsigned long end) | 
|  | 205 | { | 
|  | 206 | struct flusher_data data; | 
|  | 207 |  | 
|  | 208 | data.vma = vma; | 
|  | 209 | data.addr1 = start; | 
|  | 210 | data.addr2 = end; | 
|  | 211 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 212 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 213 | } | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 214 | EXPORT_SYMBOL(flush_cache_range); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 215 |  | 
|  | 216 | void flush_dcache_page(struct page *page) | 
|  | 217 | { | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 218 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 219 | } | 
| Paul Mundt | 0a993b0 | 2009-10-27 10:51:35 +0900 | [diff] [blame] | 220 | EXPORT_SYMBOL(flush_dcache_page); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 221 |  | 
|  | 222 | void flush_icache_range(unsigned long start, unsigned long end) | 
|  | 223 | { | 
|  | 224 | struct flusher_data data; | 
|  | 225 |  | 
|  | 226 | data.vma = NULL; | 
|  | 227 | data.addr1 = start; | 
|  | 228 | data.addr2 = end; | 
|  | 229 |  | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 230 | cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 231 | } | 
|  | 232 |  | 
|  | 233 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | 
|  | 234 | { | 
|  | 235 | /* Nothing uses the VMA, so just pass the struct page along */ | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 236 | cacheop_on_each_cpu(local_flush_icache_page, page, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 237 | } | 
|  | 238 |  | 
|  | 239 | void flush_cache_sigtramp(unsigned long address) | 
|  | 240 | { | 
| Paul Mundt | 6f37957 | 2009-09-01 21:21:36 +0900 | [diff] [blame] | 241 | cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 242 | } | 
|  | 243 |  | 
| Paul Mundt | 27d59ec | 2009-08-15 11:11:16 +0900 | [diff] [blame] | 244 | static void compute_alias(struct cache_info *c) | 
|  | 245 | { | 
|  | 246 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | 
|  | 247 | c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; | 
|  | 248 | } | 
|  | 249 |  | 
|  | 250 | static void __init emit_cache_params(void) | 
|  | 251 | { | 
|  | 252 | printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | 
|  | 253 | boot_cpu_data.icache.ways, | 
|  | 254 | boot_cpu_data.icache.sets, | 
|  | 255 | boot_cpu_data.icache.way_incr); | 
|  | 256 | printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | 
|  | 257 | boot_cpu_data.icache.entry_mask, | 
|  | 258 | boot_cpu_data.icache.alias_mask, | 
|  | 259 | boot_cpu_data.icache.n_aliases); | 
|  | 260 | printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | 
|  | 261 | boot_cpu_data.dcache.ways, | 
|  | 262 | boot_cpu_data.dcache.sets, | 
|  | 263 | boot_cpu_data.dcache.way_incr); | 
|  | 264 | printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | 
|  | 265 | boot_cpu_data.dcache.entry_mask, | 
|  | 266 | boot_cpu_data.dcache.alias_mask, | 
|  | 267 | boot_cpu_data.dcache.n_aliases); | 
|  | 268 |  | 
|  | 269 | /* | 
|  | 270 | * Emit Secondary Cache parameters if the CPU has a probed L2. | 
|  | 271 | */ | 
|  | 272 | if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | 
|  | 273 | printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | 
|  | 274 | boot_cpu_data.scache.ways, | 
|  | 275 | boot_cpu_data.scache.sets, | 
|  | 276 | boot_cpu_data.scache.way_incr); | 
|  | 277 | printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | 
|  | 278 | boot_cpu_data.scache.entry_mask, | 
|  | 279 | boot_cpu_data.scache.alias_mask, | 
|  | 280 | boot_cpu_data.scache.n_aliases); | 
|  | 281 | } | 
|  | 282 | } | 
|  | 283 |  | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 284 | void __init cpu_cache_init(void) | 
|  | 285 | { | 
| Paul Mundt | 3af539e | 2009-11-12 17:03:28 +0900 | [diff] [blame] | 286 | unsigned int cache_disabled = 0; | 
|  | 287 |  | 
|  | 288 | #ifdef CCR | 
|  | 289 | cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | 
|  | 290 | #endif | 
| Magnus Damm | 5fb80ae | 2009-10-16 14:38:48 +0900 | [diff] [blame] | 291 |  | 
| Paul Mundt | 27d59ec | 2009-08-15 11:11:16 +0900 | [diff] [blame] | 292 | compute_alias(&boot_cpu_data.icache); | 
|  | 293 | compute_alias(&boot_cpu_data.dcache); | 
|  | 294 | compute_alias(&boot_cpu_data.scache); | 
|  | 295 |  | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 296 | __flush_wback_region		= noop__flush_region; | 
|  | 297 | __flush_purge_region		= noop__flush_region; | 
|  | 298 | __flush_invalidate_region	= noop__flush_region; | 
|  | 299 |  | 
| Magnus Damm | 5fb80ae | 2009-10-16 14:38:48 +0900 | [diff] [blame] | 300 | /* | 
|  | 301 | * No flushing is necessary in the disabled cache case so we can | 
|  | 302 | * just keep the noop functions in local_flush_..() and __flush_..() | 
|  | 303 | */ | 
|  | 304 | if (unlikely(cache_disabled)) | 
|  | 305 | goto skip; | 
|  | 306 |  | 
| Paul Mundt | 109b44a | 2009-08-15 12:35:15 +0900 | [diff] [blame] | 307 | if (boot_cpu_data.family == CPU_FAMILY_SH2) { | 
|  | 308 | extern void __weak sh2_cache_init(void); | 
|  | 309 |  | 
|  | 310 | sh2_cache_init(); | 
|  | 311 | } | 
|  | 312 |  | 
| Paul Mundt | a58e1a2 | 2009-08-15 12:38:29 +0900 | [diff] [blame] | 313 | if (boot_cpu_data.family == CPU_FAMILY_SH2A) { | 
|  | 314 | extern void __weak sh2a_cache_init(void); | 
|  | 315 |  | 
|  | 316 | sh2a_cache_init(); | 
|  | 317 | } | 
|  | 318 |  | 
| Paul Mundt | 79f1c9d | 2009-08-15 12:42:55 +0900 | [diff] [blame] | 319 | if (boot_cpu_data.family == CPU_FAMILY_SH3) { | 
|  | 320 | extern void __weak sh3_cache_init(void); | 
|  | 321 |  | 
|  | 322 | sh3_cache_init(); | 
| Paul Mundt | 0d051d9 | 2009-08-15 12:53:39 +0900 | [diff] [blame] | 323 |  | 
|  | 324 | if ((boot_cpu_data.type == CPU_SH7705) && | 
|  | 325 | (boot_cpu_data.dcache.sets == 512)) { | 
|  | 326 | extern void __weak sh7705_cache_init(void); | 
|  | 327 |  | 
|  | 328 | sh7705_cache_init(); | 
|  | 329 | } | 
| Paul Mundt | 79f1c9d | 2009-08-15 12:42:55 +0900 | [diff] [blame] | 330 | } | 
|  | 331 |  | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 332 | if ((boot_cpu_data.family == CPU_FAMILY_SH4) || | 
|  | 333 | (boot_cpu_data.family == CPU_FAMILY_SH4A) || | 
|  | 334 | (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { | 
|  | 335 | extern void __weak sh4_cache_init(void); | 
|  | 336 |  | 
|  | 337 | sh4_cache_init(); | 
| Paul Mundt | 3cf6fa1 | 2010-04-19 17:27:17 +0900 | [diff] [blame] | 338 |  | 
|  | 339 | if ((boot_cpu_data.type == CPU_SH7786) || | 
|  | 340 | (boot_cpu_data.type == CPU_SHX3)) { | 
|  | 341 | extern void __weak shx3_cache_init(void); | 
|  | 342 |  | 
|  | 343 | shx3_cache_init(); | 
|  | 344 | } | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 345 | } | 
| Paul Mundt | 27d59ec | 2009-08-15 11:11:16 +0900 | [diff] [blame] | 346 |  | 
| Paul Mundt | 2b43151 | 2009-08-16 02:16:44 +0900 | [diff] [blame] | 347 | if (boot_cpu_data.family == CPU_FAMILY_SH5) { | 
|  | 348 | extern void __weak sh5_cache_init(void); | 
|  | 349 |  | 
|  | 350 | sh5_cache_init(); | 
|  | 351 | } | 
|  | 352 |  | 
| Magnus Damm | 5fb80ae | 2009-10-16 14:38:48 +0900 | [diff] [blame] | 353 | skip: | 
| Paul Mundt | 27d59ec | 2009-08-15 11:11:16 +0900 | [diff] [blame] | 354 | emit_cache_params(); | 
| Paul Mundt | ecba106 | 2009-08-15 11:05:42 +0900 | [diff] [blame] | 355 | } |