| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 2 | * arch/sh/mm/cache-sh5.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 4 | * Copyright (C) 2000, 2001  Paolo Alberelli | 
|  | 5 | * Copyright (C) 2002  Benedict Gaster | 
|  | 6 | * Copyright (C) 2003  Richard Curnow | 
|  | 7 | * Copyright (C) 2003 - 2008  Paul Mundt | 
| Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 8 | * | 
|  | 9 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 10 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 11 | * for more details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/init.h> | 
|  | 14 | #include <linux/mman.h> | 
|  | 15 | #include <linux/mm.h> | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 16 | #include <asm/tlb.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/processor.h> | 
|  | 18 | #include <asm/cache.h> | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 19 | #include <asm/pgalloc.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/uaccess.h> | 
|  | 21 | #include <asm/mmu_context.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 |  | 
|  | 23 | /* Wired TLB entry for the D-cache */ | 
|  | 24 | static unsigned long long dtlb_cache_slot; | 
|  | 25 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 26 | void __init p3_cache_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 28 | /* Reserve a slot for dcache colouring in the DTLB */ | 
|  | 29 | dtlb_cache_slot	= sh64_get_wired_dtlb_entry(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | } | 
|  | 31 |  | 
|  | 32 | #ifdef CONFIG_DCACHE_DISABLED | 
|  | 33 | #define sh64_dcache_purge_all()					do { } while (0) | 
|  | 34 | #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0) | 
|  | 35 | #define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0) | 
|  | 36 | #define sh64_dcache_purge_phy_page(paddr)			do { } while (0) | 
|  | 37 | #define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #endif | 
|  | 39 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 40 | /* | 
|  | 41 | * The following group of functions deal with mapping and unmapping a | 
|  | 42 | * temporary page into a DTLB slot that has been set aside for exclusive | 
|  | 43 | * use. | 
|  | 44 | */ | 
|  | 45 | static inline void | 
|  | 46 | sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, | 
|  | 47 | unsigned long paddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 49 | local_irq_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); | 
|  | 51 | } | 
|  | 52 |  | 
|  | 53 | static inline void sh64_teardown_dtlb_cache_slot(void) | 
|  | 54 | { | 
|  | 55 | sh64_teardown_tlb_slot(dtlb_cache_slot); | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 56 | local_irq_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | } | 
|  | 58 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #ifndef CONFIG_ICACHE_DISABLED | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 60 | static inline void sh64_icache_inv_all(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | { | 
|  | 62 | unsigned long long addr, flag, data; | 
|  | 63 | unsigned int flags; | 
|  | 64 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 65 | addr = ICCR0; | 
|  | 66 | flag = ICCR0_ICI; | 
|  | 67 | data = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 |  | 
|  | 69 | /* Make this a critical section for safety (probably not strictly necessary.) */ | 
|  | 70 | local_irq_save(flags); | 
|  | 71 |  | 
|  | 72 | /* Without %1 it gets unexplicably wrong */ | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 73 | __asm__ __volatile__ ( | 
|  | 74 | "getcfg	%3, 0, %0\n\t" | 
|  | 75 | "or	%0, %2, %0\n\t" | 
|  | 76 | "putcfg	%3, 0, %0\n\t" | 
|  | 77 | "synci" | 
|  | 78 | : "=&r" (data) | 
|  | 79 | : "0" (data), "r" (flag), "r" (addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 |  | 
|  | 81 | local_irq_restore(flags); | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) | 
|  | 85 | { | 
|  | 86 | /* Invalidate range of addresses [start,end] from the I-cache, where | 
|  | 87 | * the addresses lie in the kernel superpage. */ | 
|  | 88 |  | 
|  | 89 | unsigned long long ullend, addr, aligned_start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 91 | addr = L1_CACHE_ALIGN(aligned_start); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | ullend = (unsigned long long) (signed long long) (signed long) end; | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 93 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | while (addr <= ullend) { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 95 | __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | addr += L1_CACHE_BYTES; | 
|  | 97 | } | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) | 
|  | 101 | { | 
|  | 102 | /* If we get called, we know that vma->vm_flags contains VM_EXEC. | 
|  | 103 | Also, eaddr is page-aligned. */ | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 104 | unsigned int cpu = smp_processor_id(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | unsigned long long addr, end_addr; | 
|  | 106 | unsigned long flags = 0; | 
|  | 107 | unsigned long running_asid, vma_asid; | 
|  | 108 | addr = eaddr; | 
|  | 109 | end_addr = addr + PAGE_SIZE; | 
|  | 110 |  | 
|  | 111 | /* Check whether we can use the current ASID for the I-cache | 
|  | 112 | invalidation.  For example, if we're called via | 
|  | 113 | access_process_vm->flush_cache_page->here, (e.g. when reading from | 
|  | 114 | /proc), 'running_asid' will be that of the reader, not of the | 
|  | 115 | victim. | 
|  | 116 |  | 
|  | 117 | Also, note the risk that we might get pre-empted between the ASID | 
|  | 118 | compare and blocking IRQs, and before we regain control, the | 
|  | 119 | pid->ASID mapping changes.  However, the whole cache will get | 
|  | 120 | invalidated when the mapping is renewed, so the worst that can | 
|  | 121 | happen is that the loop below ends up invalidating somebody else's | 
|  | 122 | cache entries. | 
|  | 123 | */ | 
|  | 124 |  | 
|  | 125 | running_asid = get_asid(); | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 126 | vma_asid = cpu_asid(cpu, vma->vm_mm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | if (running_asid != vma_asid) { | 
|  | 128 | local_irq_save(flags); | 
|  | 129 | switch_and_save_asid(vma_asid); | 
|  | 130 | } | 
|  | 131 | while (addr < end_addr) { | 
|  | 132 | /* Worth unrolling a little */ | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 133 | __asm__ __volatile__("icbi %0,  0" : : "r" (addr)); | 
|  | 134 | __asm__ __volatile__("icbi %0, 32" : : "r" (addr)); | 
|  | 135 | __asm__ __volatile__("icbi %0, 64" : : "r" (addr)); | 
|  | 136 | __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | addr += 128; | 
|  | 138 | } | 
|  | 139 | if (running_asid != vma_asid) { | 
|  | 140 | switch_and_save_asid(running_asid); | 
|  | 141 | local_irq_restore(flags); | 
|  | 142 | } | 
|  | 143 | } | 
|  | 144 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | 
|  | 146 | unsigned long start, unsigned long end) | 
|  | 147 | { | 
|  | 148 | /* Used for invalidating big chunks of I-cache, i.e. assume the range | 
|  | 149 | is whole pages.  If 'start' or 'end' is not page aligned, the code | 
|  | 150 | is conservative and invalidates to the ends of the enclosing pages. | 
|  | 151 | This is functionally OK, just a performance loss. */ | 
|  | 152 |  | 
|  | 153 | /* See the comments below in sh64_dcache_purge_user_range() regarding | 
|  | 154 | the choice of algorithm.  However, for the I-cache option (2) isn't | 
|  | 155 | available because there are no physical tags so aliases can't be | 
|  | 156 | resolved.  The icbi instruction has to be used through the user | 
|  | 157 | mapping.   Because icbi is cheaper than ocbp on a cache hit, it | 
|  | 158 | would be cheaper to use the selective code for a large range than is | 
|  | 159 | possible with the D-cache.  Just assume 64 for now as a working | 
|  | 160 | figure. | 
|  | 161 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | int n_pages; | 
|  | 163 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 164 | if (!mm) | 
|  | 165 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 |  | 
|  | 167 | n_pages = ((end - start) >> PAGE_SHIFT); | 
|  | 168 | if (n_pages >= 64) { | 
|  | 169 | sh64_icache_inv_all(); | 
|  | 170 | } else { | 
|  | 171 | unsigned long aligned_start; | 
|  | 172 | unsigned long eaddr; | 
|  | 173 | unsigned long after_last_page_start; | 
|  | 174 | unsigned long mm_asid, current_asid; | 
|  | 175 | unsigned long long flags = 0ULL; | 
|  | 176 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 177 | mm_asid = cpu_asid(smp_processor_id(), mm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | current_asid = get_asid(); | 
|  | 179 |  | 
|  | 180 | if (mm_asid != current_asid) { | 
|  | 181 | /* Switch ASID and run the invalidate loop under cli */ | 
|  | 182 | local_irq_save(flags); | 
|  | 183 | switch_and_save_asid(mm_asid); | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 | aligned_start = start & PAGE_MASK; | 
|  | 187 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); | 
|  | 188 |  | 
|  | 189 | while (aligned_start < after_last_page_start) { | 
|  | 190 | struct vm_area_struct *vma; | 
|  | 191 | unsigned long vma_end; | 
|  | 192 | vma = find_vma(mm, aligned_start); | 
|  | 193 | if (!vma || (aligned_start <= vma->vm_end)) { | 
|  | 194 | /* Avoid getting stuck in an error condition */ | 
|  | 195 | aligned_start += PAGE_SIZE; | 
|  | 196 | continue; | 
|  | 197 | } | 
|  | 198 | vma_end = vma->vm_end; | 
|  | 199 | if (vma->vm_flags & VM_EXEC) { | 
|  | 200 | /* Executable */ | 
|  | 201 | eaddr = aligned_start; | 
|  | 202 | while (eaddr < vma_end) { | 
|  | 203 | sh64_icache_inv_user_page(vma, eaddr); | 
|  | 204 | eaddr += PAGE_SIZE; | 
|  | 205 | } | 
|  | 206 | } | 
|  | 207 | aligned_start = vma->vm_end; /* Skip to start of next region */ | 
|  | 208 | } | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 209 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | if (mm_asid != current_asid) { | 
|  | 211 | switch_and_save_asid(current_asid); | 
|  | 212 | local_irq_restore(flags); | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 | } | 
|  | 216 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 217 | /* | 
|  | 218 | * Invalidate a small range of user context I-cache, not necessarily page | 
|  | 219 | * (or even cache-line) aligned. | 
|  | 220 | * | 
|  | 221 | * Since this is used inside ptrace, the ASID in the mm context typically | 
|  | 222 | * won't match current_asid.  We'll have to switch ASID to do this.  For | 
|  | 223 | * safety, and given that the range will be small, do all this under cli. | 
|  | 224 | * | 
|  | 225 | * Note, there is a hazard that the ASID in mm->context is no longer | 
|  | 226 | * actually associated with mm, i.e. if the mm->context has started a new | 
|  | 227 | * cycle since mm was last active.  However, this is just a performance | 
|  | 228 | * issue: all that happens is that we invalidate lines belonging to | 
|  | 229 | * another mm, so the owning process has to refill them when that mm goes | 
|  | 230 | * live again.  mm itself can't have any cache entries because there will | 
|  | 231 | * have been a flush_cache_all when the new mm->context cycle started. | 
|  | 232 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | static void sh64_icache_inv_user_small_range(struct mm_struct *mm, | 
|  | 234 | unsigned long start, int len) | 
|  | 235 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | unsigned long long eaddr = start; | 
|  | 237 | unsigned long long eaddr_end = start + len; | 
|  | 238 | unsigned long current_asid, mm_asid; | 
|  | 239 | unsigned long long flags; | 
|  | 240 | unsigned long long epage_start; | 
|  | 241 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 242 | /* | 
|  | 243 | * Align to start of cache line.  Otherwise, suppose len==8 and | 
|  | 244 | * start was at 32N+28 : the last 4 bytes wouldn't get invalidated. | 
|  | 245 | */ | 
|  | 246 | eaddr = L1_CACHE_ALIGN(start); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | eaddr_end = start + len; | 
|  | 248 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 249 | mm_asid = cpu_asid(smp_processor_id(), mm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | local_irq_save(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | current_asid = switch_and_save_asid(mm_asid); | 
|  | 252 |  | 
|  | 253 | epage_start = eaddr & PAGE_MASK; | 
|  | 254 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 255 | while (eaddr < eaddr_end) { | 
|  | 256 | __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | eaddr += L1_CACHE_BYTES; | 
|  | 258 | } | 
|  | 259 | switch_and_save_asid(current_asid); | 
|  | 260 | local_irq_restore(flags); | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) | 
|  | 264 | { | 
|  | 265 | /* The icbi instruction never raises ITLBMISS.  i.e. if there's not a | 
|  | 266 | cache hit on the virtual tag the instruction ends there, without a | 
|  | 267 | TLB lookup. */ | 
|  | 268 |  | 
|  | 269 | unsigned long long aligned_start; | 
|  | 270 | unsigned long long ull_end; | 
|  | 271 | unsigned long long addr; | 
|  | 272 |  | 
|  | 273 | ull_end = end; | 
|  | 274 |  | 
|  | 275 | /* Just invalidate over the range using the natural addresses.  TLB | 
|  | 276 | miss handling will be OK (TBC).  Since it's for the current process, | 
|  | 277 | either we're already in the right ASID context, or the ASIDs have | 
|  | 278 | been recycled since we were last active in which case we might just | 
|  | 279 | invalidate another processes I-cache entries : no worries, just a | 
|  | 280 | performance drop for him. */ | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 281 | aligned_start = L1_CACHE_ALIGN(start); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | addr = aligned_start; | 
|  | 283 | while (addr < ull_end) { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 284 | __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); | 
|  | 285 | __asm__ __volatile__ ("nop"); | 
|  | 286 | __asm__ __volatile__ ("nop"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | addr += L1_CACHE_BYTES; | 
|  | 288 | } | 
|  | 289 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | #endif /* !CONFIG_ICACHE_DISABLED */ | 
|  | 291 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | #ifndef CONFIG_DCACHE_DISABLED | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | /* Buffer used as the target of alloco instructions to purge data from cache | 
|  | 294 | sets by natural eviction. -- RPC */ | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 295 | #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, }; | 
|  | 297 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 298 | static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | { | 
|  | 300 | /* Purge all ways in a particular block of sets, specified by the base | 
|  | 301 | set number and number of sets.  Can handle wrap-around, if that's | 
|  | 302 | needed.  */ | 
|  | 303 |  | 
|  | 304 | int dummy_buffer_base_set; | 
|  | 305 | unsigned long long eaddr, eaddr0, eaddr1; | 
|  | 306 | int j; | 
|  | 307 | int set_offset; | 
|  | 308 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 309 | dummy_buffer_base_set = ((int)&dummy_alloco_area & | 
|  | 310 | cpu_data->dcache.entry_mask) >> | 
|  | 311 | cpu_data->dcache.entry_shift; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | set_offset = sets_to_purge_base - dummy_buffer_base_set; | 
|  | 313 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 314 | for (j = 0; j < n_sets; j++, set_offset++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | set_offset &= (cpu_data->dcache.sets - 1); | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 316 | eaddr0 = (unsigned long long)dummy_alloco_area + | 
|  | 317 | (set_offset << cpu_data->dcache.entry_shift); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 319 | /* | 
|  | 320 | * Do one alloco which hits the required set per cache | 
|  | 321 | * way.  For write-back mode, this will purge the #ways | 
|  | 322 | * resident lines.  There's little point unrolling this | 
|  | 323 | * loop because the allocos stall more if they're too | 
|  | 324 | * close together. | 
|  | 325 | */ | 
|  | 326 | eaddr1 = eaddr0 + cpu_data->dcache.way_size * | 
|  | 327 | cpu_data->dcache.ways; | 
|  | 328 |  | 
|  | 329 | for (eaddr = eaddr0; eaddr < eaddr1; | 
|  | 330 | eaddr += cpu_data->dcache.way_size) { | 
|  | 331 | __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr)); | 
|  | 332 | __asm__ __volatile__ ("synco"); /* TAKum03020 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | } | 
|  | 334 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 335 | eaddr1 = eaddr0 + cpu_data->dcache.way_size * | 
|  | 336 | cpu_data->dcache.ways; | 
|  | 337 |  | 
|  | 338 | for (eaddr = eaddr0; eaddr < eaddr1; | 
|  | 339 | eaddr += cpu_data->dcache.way_size) { | 
|  | 340 | /* | 
|  | 341 | * Load from each address.  Required because | 
|  | 342 | * alloco is a NOP if the cache is write-through. | 
|  | 343 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 345 | ctrl_inb(eaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | } | 
|  | 347 | } | 
|  | 348 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 349 | /* | 
|  | 350 | * Don't use OCBI to invalidate the lines.  That costs cycles | 
|  | 351 | * directly.  If the dummy block is just left resident, it will | 
|  | 352 | * naturally get evicted as required. | 
|  | 353 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | } | 
|  | 355 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 356 | /* | 
|  | 357 | * Purge the entire contents of the dcache.  The most efficient way to | 
|  | 358 | * achieve this is to use alloco instructions on a region of unused | 
|  | 359 | * memory equal in size to the cache, thereby causing the current | 
|  | 360 | * contents to be discarded by natural eviction.  The alternative, namely | 
|  | 361 | * reading every tag, setting up a mapping for the corresponding page and | 
|  | 362 | * doing an OCBP for the line, would be much more expensive. | 
|  | 363 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | static void sh64_dcache_purge_all(void) | 
|  | 365 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 |  | 
|  | 367 | sh64_dcache_purge_sets(0, cpu_data->dcache.sets); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | } | 
|  | 369 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 |  | 
|  | 371 | /* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for | 
|  | 372 | anything else in the kernel */ | 
|  | 373 | #define MAGIC_PAGE0_START 0xffffffffec000000ULL | 
|  | 374 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 375 | /* Purge the physical page 'paddr' from the cache.  It's known that any | 
|  | 376 | * cache lines requiring attention have the same page colour as the the | 
|  | 377 | * address 'eaddr'. | 
|  | 378 | * | 
|  | 379 | * This relies on the fact that the D-cache matches on physical tags when | 
|  | 380 | * no virtual tag matches.  So we create an alias for the original page | 
|  | 381 | * and purge through that.  (Alternatively, we could have done this by | 
|  | 382 | * switching ASID to match the original mapping and purged through that, | 
|  | 383 | * but that involves ASID switching cost + probably a TLBMISS + refill | 
|  | 384 | * anyway.) | 
|  | 385 | */ | 
|  | 386 | static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, | 
|  | 387 | unsigned long eaddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | unsigned long long magic_page_start; | 
|  | 390 | unsigned long long magic_eaddr, magic_eaddr_end; | 
|  | 391 |  | 
|  | 392 | magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK); | 
|  | 393 |  | 
|  | 394 | /* As long as the kernel is not pre-emptible, this doesn't need to be | 
|  | 395 | under cli/sti. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr); | 
|  | 397 |  | 
|  | 398 | magic_eaddr = magic_page_start; | 
|  | 399 | magic_eaddr_end = magic_eaddr + PAGE_SIZE; | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 400 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | while (magic_eaddr < magic_eaddr_end) { | 
|  | 402 | /* Little point in unrolling this loop - the OCBPs are blocking | 
|  | 403 | and won't go any quicker (i.e. the loop overhead is parallel | 
|  | 404 | to part of the OCBP execution.) */ | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 405 | __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | magic_eaddr += L1_CACHE_BYTES; | 
|  | 407 | } | 
|  | 408 |  | 
|  | 409 | sh64_teardown_dtlb_cache_slot(); | 
|  | 410 | } | 
|  | 411 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 412 | /* | 
|  | 413 | * Purge a page given its physical start address, by creating a temporary | 
|  | 414 | * 1 page mapping and purging across that.  Even if we know the virtual | 
|  | 415 | * address (& vma or mm) of the page, the method here is more elegant | 
|  | 416 | * because it avoids issues of coping with page faults on the purge | 
|  | 417 | * instructions (i.e. no special-case code required in the critical path | 
|  | 418 | * in the TLB miss handling). | 
|  | 419 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | static void sh64_dcache_purge_phy_page(unsigned long paddr) | 
|  | 421 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | unsigned long long eaddr_start, eaddr, eaddr_end; | 
|  | 423 | int i; | 
|  | 424 |  | 
|  | 425 | /* As long as the kernel is not pre-emptible, this doesn't need to be | 
|  | 426 | under cli/sti. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | eaddr_start = MAGIC_PAGE0_START; | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 428 | for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr); | 
|  | 430 |  | 
|  | 431 | eaddr = eaddr_start; | 
|  | 432 | eaddr_end = eaddr + PAGE_SIZE; | 
|  | 433 | while (eaddr < eaddr_end) { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 434 | __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | eaddr += L1_CACHE_BYTES; | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | sh64_teardown_dtlb_cache_slot(); | 
|  | 439 | eaddr_start += PAGE_SIZE; | 
|  | 440 | } | 
|  | 441 | } | 
|  | 442 |  | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 443 | static void sh64_dcache_purge_user_pages(struct mm_struct *mm, | 
|  | 444 | unsigned long addr, unsigned long end) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | { | 
|  | 446 | pgd_t *pgd; | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 447 | pud_t *pud; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | pmd_t *pmd; | 
|  | 449 | pte_t *pte; | 
|  | 450 | pte_t entry; | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 451 | spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | unsigned long paddr; | 
|  | 453 |  | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 454 | if (!mm) | 
|  | 455 | return; /* No way to find physical address of page */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 |  | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 457 | pgd = pgd_offset(mm, addr); | 
|  | 458 | if (pgd_bad(*pgd)) | 
|  | 459 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 461 | pud = pud_offset(pgd, addr); | 
|  | 462 | if (pud_none(*pud) || pud_bad(*pud)) | 
|  | 463 | return; | 
|  | 464 |  | 
|  | 465 | pmd = pmd_offset(pud, addr); | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 466 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | 
|  | 467 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 |  | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 469 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | 
|  | 470 | do { | 
|  | 471 | entry = *pte; | 
|  | 472 | if (pte_none(entry) || !pte_present(entry)) | 
|  | 473 | continue; | 
|  | 474 | paddr = pte_val(entry) & PAGE_MASK; | 
|  | 475 | sh64_dcache_purge_coloured_phy_page(paddr, addr); | 
|  | 476 | } while (pte++, addr += PAGE_SIZE, addr != end); | 
|  | 477 | pte_unmap_unlock(pte - 1, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 480 | /* | 
|  | 481 | * There are at least 5 choices for the implementation of this, with | 
|  | 482 | * pros (+), cons(-), comments(*): | 
|  | 483 | * | 
|  | 484 | * 1. ocbp each line in the range through the original user's ASID | 
|  | 485 | *    + no lines spuriously evicted | 
|  | 486 | *    - tlbmiss handling (must either handle faults on demand => extra | 
|  | 487 | *	special-case code in tlbmiss critical path), or map the page in | 
|  | 488 | *	advance (=> flush_tlb_range in advance to avoid multiple hits) | 
|  | 489 | *    - ASID switching | 
|  | 490 | *    - expensive for large ranges | 
|  | 491 | * | 
|  | 492 | * 2. temporarily map each page in the range to a special effective | 
|  | 493 | *    address and ocbp through the temporary mapping; relies on the | 
|  | 494 | *    fact that SH-5 OCB* always do TLB lookup and match on ptags (they | 
|  | 495 | *    never look at the etags) | 
|  | 496 | *    + no spurious evictions | 
|  | 497 | *    - expensive for large ranges | 
|  | 498 | *    * surely cheaper than (1) | 
|  | 499 | * | 
|  | 500 | * 3. walk all the lines in the cache, check the tags, if a match | 
|  | 501 | *    occurs create a page mapping to ocbp the line through | 
|  | 502 | *    + no spurious evictions | 
|  | 503 | *    - tag inspection overhead | 
|  | 504 | *    - (especially for small ranges) | 
|  | 505 | *    - potential cost of setting up/tearing down page mapping for | 
|  | 506 | *	every line that matches the range | 
|  | 507 | *    * cost partly independent of range size | 
|  | 508 | * | 
|  | 509 | * 4. walk all the lines in the cache, check the tags, if a match | 
|  | 510 | *    occurs use 4 * alloco to purge the line (+3 other probably | 
|  | 511 | *    innocent victims) by natural eviction | 
|  | 512 | *    + no tlb mapping overheads | 
|  | 513 | *    - spurious evictions | 
|  | 514 | *    - tag inspection overhead | 
|  | 515 | * | 
|  | 516 | * 5. implement like flush_cache_all | 
|  | 517 | *    + no tag inspection overhead | 
|  | 518 | *    - spurious evictions | 
|  | 519 | *    - bad for small ranges | 
|  | 520 | * | 
|  | 521 | * (1) can be ruled out as more expensive than (2).  (2) appears best | 
|  | 522 | * for small ranges.  The choice between (3), (4) and (5) for large | 
|  | 523 | * ranges and the range size for the large/small boundary need | 
|  | 524 | * benchmarking to determine. | 
|  | 525 | * | 
|  | 526 | * For now use approach (2) for small ranges and (5) for large ones. | 
|  | 527 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | static void sh64_dcache_purge_user_range(struct mm_struct *mm, | 
|  | 529 | unsigned long start, unsigned long end) | 
|  | 530 | { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 531 | int n_pages = ((end - start) >> PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 |  | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 533 | if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | sh64_dcache_purge_all(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | } else { | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 536 | /* Small range, covered by a single page table page */ | 
|  | 537 | start &= PAGE_MASK;	/* should already be so */ | 
|  | 538 | end = PAGE_ALIGN(end);	/* should already be so */ | 
|  | 539 | sh64_dcache_purge_user_pages(mm, start, end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | } | 
|  | 542 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 543 | /* | 
|  | 544 | * Purge the range of addresses from the D-cache. | 
|  | 545 | * | 
|  | 546 | * The addresses lie in the superpage mapping. There's no harm if we | 
|  | 547 | * overpurge at either end - just a small performance loss. | 
|  | 548 | */ | 
|  | 549 | void __flush_purge_region(void *start, int size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 551 | unsigned long long ullend, addr, aligned_start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 553 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | 
|  | 554 | addr = L1_CACHE_ALIGN(aligned_start); | 
|  | 555 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 557 | while (addr <= ullend) { | 
|  | 558 | __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | addr += L1_CACHE_BYTES; | 
|  | 560 | } | 
|  | 561 | } | 
|  | 562 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 563 | void __flush_wback_region(void *start, int size) | 
|  | 564 | { | 
|  | 565 | unsigned long long ullend, addr, aligned_start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 567 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | 
|  | 568 | addr = L1_CACHE_ALIGN(aligned_start); | 
|  | 569 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | 
|  | 570 |  | 
|  | 571 | while (addr < ullend) { | 
|  | 572 | __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr)); | 
|  | 573 | addr += L1_CACHE_BYTES; | 
|  | 574 | } | 
|  | 575 | } | 
|  | 576 |  | 
|  | 577 | void __flush_invalidate_region(void *start, int size) | 
|  | 578 | { | 
|  | 579 | unsigned long long ullend, addr, aligned_start; | 
|  | 580 |  | 
|  | 581 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | 
|  | 582 | addr = L1_CACHE_ALIGN(aligned_start); | 
|  | 583 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | 
|  | 584 |  | 
|  | 585 | while (addr < ullend) { | 
|  | 586 | __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr)); | 
|  | 587 | addr += L1_CACHE_BYTES; | 
|  | 588 | } | 
|  | 589 | } | 
|  | 590 | #endif /* !CONFIG_DCACHE_DISABLED */ | 
|  | 591 |  | 
|  | 592 | /* | 
|  | 593 | * Invalidate the entire contents of both caches, after writing back to | 
|  | 594 | * memory any dirty data from the D-cache. | 
|  | 595 | */ | 
|  | 596 | void flush_cache_all(void) | 
|  | 597 | { | 
|  | 598 | sh64_dcache_purge_all(); | 
|  | 599 | sh64_icache_inv_all(); | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | /* | 
|  | 603 | * Invalidate an entire user-address space from both caches, after | 
|  | 604 | * writing back dirty data (e.g. for shared mmap etc). | 
|  | 605 | * | 
|  | 606 | * This could be coded selectively by inspecting all the tags then | 
|  | 607 | * doing 4*alloco on any set containing a match (as for | 
|  | 608 | * flush_cache_range), but fork/exit/execve (where this is called from) | 
|  | 609 | * are expensive anyway. | 
|  | 610 | * | 
|  | 611 | * Have to do a purge here, despite the comments re I-cache below. | 
|  | 612 | * There could be odd-coloured dirty data associated with the mm still | 
|  | 613 | * in the cache - if this gets written out through natural eviction | 
|  | 614 | * after the kernel has reused the page there will be chaos. | 
|  | 615 | * | 
|  | 616 | * The mm being torn down won't ever be active again, so any Icache | 
|  | 617 | * lines tagged with its ASID won't be visible for the rest of the | 
|  | 618 | * lifetime of this ASID cycle.  Before the ASID gets reused, there | 
|  | 619 | * will be a flush_cache_all.  Hence we don't need to touch the | 
|  | 620 | * I-cache.  This is similar to the lack of action needed in | 
|  | 621 | * flush_tlb_mm - see fault.c. | 
|  | 622 | */ | 
|  | 623 | void flush_cache_mm(struct mm_struct *mm) | 
|  | 624 | { | 
|  | 625 | sh64_dcache_purge_all(); | 
|  | 626 | } | 
|  | 627 |  | 
|  | 628 | /* | 
|  | 629 | * Invalidate (from both caches) the range [start,end) of virtual | 
|  | 630 | * addresses from the user address space specified by mm, after writing | 
|  | 631 | * back any dirty data. | 
|  | 632 | * | 
|  | 633 | * Note, 'end' is 1 byte beyond the end of the range to flush. | 
|  | 634 | */ | 
|  | 635 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 
|  | 636 | unsigned long end) | 
|  | 637 | { | 
|  | 638 | struct mm_struct *mm = vma->vm_mm; | 
|  | 639 |  | 
|  | 640 | sh64_dcache_purge_user_range(mm, start, end); | 
|  | 641 | sh64_icache_inv_user_page_range(mm, start, end); | 
|  | 642 | } | 
|  | 643 |  | 
|  | 644 | /* | 
|  | 645 | * Invalidate any entries in either cache for the vma within the user | 
|  | 646 | * address space vma->vm_mm for the page starting at virtual address | 
|  | 647 | * 'eaddr'.   This seems to be used primarily in breaking COW.  Note, | 
|  | 648 | * the I-cache must be searched too in case the page in question is | 
|  | 649 | * both writable and being executed from (e.g. stack trampolines.) | 
|  | 650 | * | 
|  | 651 | * Note, this is called with pte lock held. | 
|  | 652 | */ | 
|  | 653 | void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, | 
|  | 654 | unsigned long pfn) | 
|  | 655 | { | 
|  | 656 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); | 
|  | 657 |  | 
|  | 658 | if (vma->vm_flags & VM_EXEC) | 
|  | 659 | sh64_icache_inv_user_page(vma, eaddr); | 
|  | 660 | } | 
|  | 661 |  | 
|  | 662 | void flush_dcache_page(struct page *page) | 
|  | 663 | { | 
|  | 664 | sh64_dcache_purge_phy_page(page_to_phys(page)); | 
|  | 665 | wmb(); | 
|  | 666 | } | 
|  | 667 |  | 
|  | 668 | /* | 
|  | 669 | * Flush the range [start,end] of kernel virtual adddress space from | 
|  | 670 | * the I-cache.  The corresponding range must be purged from the | 
|  | 671 | * D-cache also because the SH-5 doesn't have cache snooping between | 
|  | 672 | * the caches.  The addresses will be visible through the superpage | 
|  | 673 | * mapping, therefore it's guaranteed that there no cache entries for | 
|  | 674 | * the range in cache sets of the wrong colour. | 
|  | 675 | */ | 
|  | 676 | void flush_icache_range(unsigned long start, unsigned long end) | 
|  | 677 | { | 
|  | 678 | __flush_purge_region((void *)start, end); | 
|  | 679 | wmb(); | 
|  | 680 | sh64_icache_inv_kernel_range(start, end); | 
|  | 681 | } | 
|  | 682 |  | 
|  | 683 | /* | 
|  | 684 | * Flush the range of user (defined by vma->vm_mm) address space starting | 
|  | 685 | * at 'addr' for 'len' bytes from the cache.  The range does not straddle | 
|  | 686 | * a page boundary, the unique physical page containing the range is | 
|  | 687 | * 'page'.  This seems to be used mainly for invalidating an address | 
|  | 688 | * range following a poke into the program text through the ptrace() call | 
|  | 689 | * from another process (e.g. for BRK instruction insertion). | 
|  | 690 | */ | 
|  | 691 | void flush_icache_user_range(struct vm_area_struct *vma, | 
|  | 692 | struct page *page, unsigned long addr, int len) | 
|  | 693 | { | 
|  | 694 |  | 
|  | 695 | sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); | 
|  | 696 | mb(); | 
|  | 697 |  | 
|  | 698 | if (vma->vm_flags & VM_EXEC) | 
|  | 699 | sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); | 
|  | 700 | } | 
|  | 701 |  | 
|  | 702 | /* | 
|  | 703 | * For the address range [start,end), write back the data from the | 
|  | 704 | * D-cache and invalidate the corresponding region of the I-cache for the | 
|  | 705 | * current process.  Used to flush signal trampolines on the stack to | 
|  | 706 | * make them executable. | 
|  | 707 | */ | 
|  | 708 | void flush_cache_sigtramp(unsigned long vaddr) | 
|  | 709 | { | 
|  | 710 | unsigned long end = vaddr + L1_CACHE_BYTES; | 
|  | 711 |  | 
|  | 712 | __flush_wback_region((void *)vaddr, L1_CACHE_BYTES); | 
|  | 713 | wmb(); | 
|  | 714 | sh64_icache_inv_current_user_range(vaddr, end); | 
|  | 715 | } | 
|  | 716 |  | 
| Paul Mundt | ccd8058 | 2008-04-25 12:58:40 +0900 | [diff] [blame] | 717 | #ifdef CONFIG_MMU | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 718 | /* | 
|  | 719 | * These *MUST* lie in an area of virtual address space that's otherwise | 
|  | 720 | * unused. | 
|  | 721 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | #define UNIQUE_EADDR_START 0xe0000000UL | 
|  | 723 | #define UNIQUE_EADDR_END   0xe8000000UL | 
|  | 724 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 725 | /* | 
|  | 726 | * Given a physical address paddr, and a user virtual address user_eaddr | 
|  | 727 | * which will eventually be mapped to it, create a one-off kernel-private | 
|  | 728 | * eaddr mapped to the same paddr.  This is used for creating special | 
|  | 729 | * destination pages for copy_user_page and clear_user_page. | 
|  | 730 | */ | 
|  | 731 | static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, | 
|  | 732 | unsigned long paddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | static unsigned long current_pointer = UNIQUE_EADDR_START; | 
|  | 735 | unsigned long coloured_pointer; | 
|  | 736 |  | 
|  | 737 | if (current_pointer == UNIQUE_EADDR_END) { | 
|  | 738 | sh64_dcache_purge_all(); | 
|  | 739 | current_pointer = UNIQUE_EADDR_START; | 
|  | 740 | } | 
|  | 741 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 742 | coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | | 
|  | 743 | (user_eaddr & CACHE_OC_SYN_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); | 
|  | 745 |  | 
|  | 746 | current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); | 
|  | 747 |  | 
|  | 748 | return coloured_pointer; | 
|  | 749 | } | 
|  | 750 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 751 | static void sh64_copy_user_page_coloured(void *to, void *from, | 
|  | 752 | unsigned long address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | { | 
|  | 754 | void *coloured_to; | 
|  | 755 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 756 | /* | 
|  | 757 | * Discard any existing cache entries of the wrong colour.  These are | 
|  | 758 | * present quite often, if the kernel has recently used the page | 
|  | 759 | * internally, then given it up, then it's been allocated to the user. | 
|  | 760 | */ | 
|  | 761 | sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 763 | coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); | 
|  | 764 | copy_page(from, coloured_to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 |  | 
|  | 766 | sh64_teardown_dtlb_cache_slot(); | 
|  | 767 | } | 
|  | 768 |  | 
|  | 769 | static void sh64_clear_user_page_coloured(void *to, unsigned long address) | 
|  | 770 | { | 
|  | 771 | void *coloured_to; | 
|  | 772 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 773 | /* | 
|  | 774 | * Discard any existing kernel-originated lines of the wrong | 
|  | 775 | * colour (as above) | 
|  | 776 | */ | 
|  | 777 | sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 779 | coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); | 
|  | 780 | clear_page(coloured_to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 |  | 
|  | 782 | sh64_teardown_dtlb_cache_slot(); | 
|  | 783 | } | 
|  | 784 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 785 | /* | 
|  | 786 | * 'from' and 'to' are kernel virtual addresses (within the superpage | 
|  | 787 | * mapping of the physical RAM).  'address' is the user virtual address | 
|  | 788 | * where the copy 'to' will be mapped after.  This allows a custom | 
|  | 789 | * mapping to be used to ensure that the new copy is placed in the | 
|  | 790 | * right cache sets for the user to see it without having to bounce it | 
|  | 791 | * out via memory.  Note however : the call to flush_page_to_ram in | 
|  | 792 | * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one | 
|  | 793 | * very important case! | 
|  | 794 | * | 
|  | 795 | * TBD : can we guarantee that on every call, any cache entries for | 
|  | 796 | * 'from' are in the same colour sets as 'address' also?  i.e. is this | 
|  | 797 | * always used just to deal with COW?  (I suspect not). | 
|  | 798 | * | 
|  | 799 | * There are two possibilities here for when the page 'from' was last accessed: | 
|  | 800 | * - by the kernel : this is OK, no purge required. | 
|  | 801 | * - by the/a user (e.g. for break_COW) : need to purge. | 
|  | 802 | * | 
|  | 803 | * If the potential user mapping at 'address' is the same colour as | 
|  | 804 | * 'from' there is no need to purge any cache lines from the 'from' | 
|  | 805 | * page mapped into cache sets of colour 'address'.  (The copy will be | 
|  | 806 | * accessing the page through 'from'). | 
|  | 807 | */ | 
|  | 808 | void copy_user_page(void *to, void *from, unsigned long address, | 
|  | 809 | struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 | { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 811 | if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | sh64_dcache_purge_coloured_phy_page(__pa(from), address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 814 | if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) | 
|  | 815 | copy_page(to, from); | 
|  | 816 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | sh64_copy_user_page_coloured(to, from, address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | } | 
|  | 819 |  | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 820 | /* | 
|  | 821 | * 'to' is a kernel virtual address (within the superpage mapping of the | 
|  | 822 | * physical RAM).  'address' is the user virtual address where the 'to' | 
|  | 823 | * page will be mapped after.  This allows a custom mapping to be used to | 
|  | 824 | * ensure that the new copy is placed in the right cache sets for the | 
|  | 825 | * user to see it without having to bounce it out via memory. | 
|  | 826 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | void clear_user_page(void *to, unsigned long address, struct page *page) | 
|  | 828 | { | 
| Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 829 | if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) | 
|  | 830 | clear_page(to); | 
|  | 831 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | sh64_clear_user_page_coloured(to, address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | } | 
| Paul Mundt | ccd8058 | 2008-04-25 12:58:40 +0900 | [diff] [blame] | 834 | #endif |