blob: 576cad04b11b79016f1bf4a534b194eec68a4b3a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Mundta23ba432007-11-28 20:19:38 +09002 * arch/sh/mm/cache-sh5.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Paul Mundt38350e02008-02-13 20:14:10 +09004 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2002 Benedict Gaster
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (C) 2003 - 2008 Paul Mundt
Paul Mundta23ba432007-11-28 20:19:38 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
Paul Mundt38350e02008-02-13 20:14:10 +090016#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/processor.h>
18#include <asm/cache.h>
Paul Mundt38350e02008-02-13 20:14:10 +090019#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/uaccess.h>
21#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23/* Wired TLB entry for the D-cache */
24static unsigned long long dtlb_cache_slot;
25
Paul Mundtecba1062009-08-15 11:05:42 +090026void __init cpu_cache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027{
Paul Mundt38350e02008-02-13 20:14:10 +090028 /* Reserve a slot for dcache colouring in the DTLB */
29 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
Linus Torvalds1da177e2005-04-16 15:20:36 -070030}
31
Paul Mundt27397422009-08-15 09:19:19 +090032void __init kmap_coherent_init(void)
33{
34 /* XXX ... */
35}
36
37void *kmap_coherent(struct page *page, unsigned long addr)
38{
39 /* XXX ... */
40 return NULL;
41}
42
43void kunmap_coherent(void)
44{
45}
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#ifdef CONFIG_DCACHE_DISABLED
48#define sh64_dcache_purge_all() do { } while (0)
49#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0)
50#define sh64_dcache_purge_user_range(mm, start, end) do { } while (0)
51#define sh64_dcache_purge_phy_page(paddr) do { } while (0)
52#define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#endif
54
Paul Mundt38350e02008-02-13 20:14:10 +090055/*
56 * The following group of functions deal with mapping and unmapping a
57 * temporary page into a DTLB slot that has been set aside for exclusive
58 * use.
59 */
60static inline void
61sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
62 unsigned long paddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
Paul Mundt38350e02008-02-13 20:14:10 +090064 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
66}
67
68static inline void sh64_teardown_dtlb_cache_slot(void)
69{
70 sh64_teardown_tlb_slot(dtlb_cache_slot);
Paul Mundt38350e02008-02-13 20:14:10 +090071 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#ifndef CONFIG_ICACHE_DISABLED
Paul Mundt38350e02008-02-13 20:14:10 +090075static inline void sh64_icache_inv_all(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
77 unsigned long long addr, flag, data;
Paul Mundt2fedaac2009-05-09 14:38:49 +090078 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Paul Mundt38350e02008-02-13 20:14:10 +090080 addr = ICCR0;
81 flag = ICCR0_ICI;
82 data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84 /* Make this a critical section for safety (probably not strictly necessary.) */
85 local_irq_save(flags);
86
87 /* Without %1 it gets unexplicably wrong */
Paul Mundt38350e02008-02-13 20:14:10 +090088 __asm__ __volatile__ (
89 "getcfg %3, 0, %0\n\t"
90 "or %0, %2, %0\n\t"
91 "putcfg %3, 0, %0\n\t"
92 "synci"
93 : "=&r" (data)
94 : "0" (data), "r" (flag), "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96 local_irq_restore(flags);
97}
98
99static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
100{
101 /* Invalidate range of addresses [start,end] from the I-cache, where
102 * the addresses lie in the kernel superpage. */
103
104 unsigned long long ullend, addr, aligned_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 aligned_start = (unsigned long long)(signed long long)(signed long) start;
Paul Mundt38350e02008-02-13 20:14:10 +0900106 addr = L1_CACHE_ALIGN(aligned_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 ullend = (unsigned long long) (signed long long) (signed long) end;
Paul Mundt38350e02008-02-13 20:14:10 +0900108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 while (addr <= ullend) {
Paul Mundt38350e02008-02-13 20:14:10 +0900110 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 addr += L1_CACHE_BYTES;
112 }
113}
114
115static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
116{
117 /* If we get called, we know that vma->vm_flags contains VM_EXEC.
118 Also, eaddr is page-aligned. */
Paul Mundt38350e02008-02-13 20:14:10 +0900119 unsigned int cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 unsigned long long addr, end_addr;
121 unsigned long flags = 0;
122 unsigned long running_asid, vma_asid;
123 addr = eaddr;
124 end_addr = addr + PAGE_SIZE;
125
126 /* Check whether we can use the current ASID for the I-cache
127 invalidation. For example, if we're called via
128 access_process_vm->flush_cache_page->here, (e.g. when reading from
129 /proc), 'running_asid' will be that of the reader, not of the
130 victim.
131
132 Also, note the risk that we might get pre-empted between the ASID
133 compare and blocking IRQs, and before we regain control, the
134 pid->ASID mapping changes. However, the whole cache will get
135 invalidated when the mapping is renewed, so the worst that can
136 happen is that the loop below ends up invalidating somebody else's
137 cache entries.
138 */
139
140 running_asid = get_asid();
Paul Mundt38350e02008-02-13 20:14:10 +0900141 vma_asid = cpu_asid(cpu, vma->vm_mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 if (running_asid != vma_asid) {
143 local_irq_save(flags);
144 switch_and_save_asid(vma_asid);
145 }
146 while (addr < end_addr) {
147 /* Worth unrolling a little */
Paul Mundt38350e02008-02-13 20:14:10 +0900148 __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
149 __asm__ __volatile__("icbi %0, 32" : : "r" (addr));
150 __asm__ __volatile__("icbi %0, 64" : : "r" (addr));
151 __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 addr += 128;
153 }
154 if (running_asid != vma_asid) {
155 switch_and_save_asid(running_asid);
156 local_irq_restore(flags);
157 }
158}
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
161 unsigned long start, unsigned long end)
162{
163 /* Used for invalidating big chunks of I-cache, i.e. assume the range
164 is whole pages. If 'start' or 'end' is not page aligned, the code
165 is conservative and invalidates to the ends of the enclosing pages.
166 This is functionally OK, just a performance loss. */
167
168 /* See the comments below in sh64_dcache_purge_user_range() regarding
169 the choice of algorithm. However, for the I-cache option (2) isn't
170 available because there are no physical tags so aliases can't be
171 resolved. The icbi instruction has to be used through the user
172 mapping. Because icbi is cheaper than ocbp on a cache hit, it
173 would be cheaper to use the selective code for a large range than is
174 possible with the D-cache. Just assume 64 for now as a working
175 figure.
176 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 int n_pages;
178
Paul Mundt38350e02008-02-13 20:14:10 +0900179 if (!mm)
180 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 n_pages = ((end - start) >> PAGE_SHIFT);
183 if (n_pages >= 64) {
184 sh64_icache_inv_all();
185 } else {
186 unsigned long aligned_start;
187 unsigned long eaddr;
188 unsigned long after_last_page_start;
189 unsigned long mm_asid, current_asid;
Paul Mundt2fedaac2009-05-09 14:38:49 +0900190 unsigned long flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Paul Mundt38350e02008-02-13 20:14:10 +0900192 mm_asid = cpu_asid(smp_processor_id(), mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 current_asid = get_asid();
194
195 if (mm_asid != current_asid) {
196 /* Switch ASID and run the invalidate loop under cli */
197 local_irq_save(flags);
198 switch_and_save_asid(mm_asid);
199 }
200
201 aligned_start = start & PAGE_MASK;
202 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
203
204 while (aligned_start < after_last_page_start) {
205 struct vm_area_struct *vma;
206 unsigned long vma_end;
207 vma = find_vma(mm, aligned_start);
208 if (!vma || (aligned_start <= vma->vm_end)) {
209 /* Avoid getting stuck in an error condition */
210 aligned_start += PAGE_SIZE;
211 continue;
212 }
213 vma_end = vma->vm_end;
214 if (vma->vm_flags & VM_EXEC) {
215 /* Executable */
216 eaddr = aligned_start;
217 while (eaddr < vma_end) {
218 sh64_icache_inv_user_page(vma, eaddr);
219 eaddr += PAGE_SIZE;
220 }
221 }
222 aligned_start = vma->vm_end; /* Skip to start of next region */
223 }
Paul Mundt38350e02008-02-13 20:14:10 +0900224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 if (mm_asid != current_asid) {
226 switch_and_save_asid(current_asid);
227 local_irq_restore(flags);
228 }
229 }
230}
231
Paul Mundt38350e02008-02-13 20:14:10 +0900232/*
233 * Invalidate a small range of user context I-cache, not necessarily page
234 * (or even cache-line) aligned.
235 *
236 * Since this is used inside ptrace, the ASID in the mm context typically
237 * won't match current_asid. We'll have to switch ASID to do this. For
238 * safety, and given that the range will be small, do all this under cli.
239 *
240 * Note, there is a hazard that the ASID in mm->context is no longer
241 * actually associated with mm, i.e. if the mm->context has started a new
242 * cycle since mm was last active. However, this is just a performance
243 * issue: all that happens is that we invalidate lines belonging to
244 * another mm, so the owning process has to refill them when that mm goes
245 * live again. mm itself can't have any cache entries because there will
246 * have been a flush_cache_all when the new mm->context cycle started.
247 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
249 unsigned long start, int len)
250{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 unsigned long long eaddr = start;
252 unsigned long long eaddr_end = start + len;
253 unsigned long current_asid, mm_asid;
Paul Mundt2fedaac2009-05-09 14:38:49 +0900254 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 unsigned long long epage_start;
256
Paul Mundt38350e02008-02-13 20:14:10 +0900257 /*
258 * Align to start of cache line. Otherwise, suppose len==8 and
259 * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
260 */
261 eaddr = L1_CACHE_ALIGN(start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 eaddr_end = start + len;
263
Paul Mundt38350e02008-02-13 20:14:10 +0900264 mm_asid = cpu_asid(smp_processor_id(), mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 current_asid = switch_and_save_asid(mm_asid);
267
268 epage_start = eaddr & PAGE_MASK;
269
Paul Mundt38350e02008-02-13 20:14:10 +0900270 while (eaddr < eaddr_end) {
271 __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 eaddr += L1_CACHE_BYTES;
273 }
274 switch_and_save_asid(current_asid);
275 local_irq_restore(flags);
276}
277
278static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
279{
280 /* The icbi instruction never raises ITLBMISS. i.e. if there's not a
281 cache hit on the virtual tag the instruction ends there, without a
282 TLB lookup. */
283
284 unsigned long long aligned_start;
285 unsigned long long ull_end;
286 unsigned long long addr;
287
288 ull_end = end;
289
290 /* Just invalidate over the range using the natural addresses. TLB
291 miss handling will be OK (TBC). Since it's for the current process,
292 either we're already in the right ASID context, or the ASIDs have
293 been recycled since we were last active in which case we might just
294 invalidate another processes I-cache entries : no worries, just a
295 performance drop for him. */
Paul Mundt38350e02008-02-13 20:14:10 +0900296 aligned_start = L1_CACHE_ALIGN(start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 addr = aligned_start;
298 while (addr < ull_end) {
Paul Mundt38350e02008-02-13 20:14:10 +0900299 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
300 __asm__ __volatile__ ("nop");
301 __asm__ __volatile__ ("nop");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 addr += L1_CACHE_BYTES;
303 }
304}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305#endif /* !CONFIG_ICACHE_DISABLED */
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307#ifndef CONFIG_DCACHE_DISABLED
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308/* Buffer used as the target of alloco instructions to purge data from cache
309 sets by natural eviction. -- RPC */
Paul Mundt38350e02008-02-13 20:14:10 +0900310#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
312
Paul Mundt38350e02008-02-13 20:14:10 +0900313static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314{
315 /* Purge all ways in a particular block of sets, specified by the base
316 set number and number of sets. Can handle wrap-around, if that's
317 needed. */
318
319 int dummy_buffer_base_set;
320 unsigned long long eaddr, eaddr0, eaddr1;
321 int j;
322 int set_offset;
323
Paul Mundt38350e02008-02-13 20:14:10 +0900324 dummy_buffer_base_set = ((int)&dummy_alloco_area &
325 cpu_data->dcache.entry_mask) >>
326 cpu_data->dcache.entry_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 set_offset = sets_to_purge_base - dummy_buffer_base_set;
328
Paul Mundt38350e02008-02-13 20:14:10 +0900329 for (j = 0; j < n_sets; j++, set_offset++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 set_offset &= (cpu_data->dcache.sets - 1);
Paul Mundt38350e02008-02-13 20:14:10 +0900331 eaddr0 = (unsigned long long)dummy_alloco_area +
332 (set_offset << cpu_data->dcache.entry_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
Paul Mundt38350e02008-02-13 20:14:10 +0900334 /*
335 * Do one alloco which hits the required set per cache
336 * way. For write-back mode, this will purge the #ways
337 * resident lines. There's little point unrolling this
338 * loop because the allocos stall more if they're too
339 * close together.
340 */
341 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
342 cpu_data->dcache.ways;
343
344 for (eaddr = eaddr0; eaddr < eaddr1;
345 eaddr += cpu_data->dcache.way_size) {
346 __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
347 __asm__ __volatile__ ("synco"); /* TAKum03020 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 }
349
Paul Mundt38350e02008-02-13 20:14:10 +0900350 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
351 cpu_data->dcache.ways;
352
353 for (eaddr = eaddr0; eaddr < eaddr1;
354 eaddr += cpu_data->dcache.way_size) {
355 /*
356 * Load from each address. Required because
357 * alloco is a NOP if the cache is write-through.
358 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
Paul Mundt2fedaac2009-05-09 14:38:49 +0900360 __raw_readb((unsigned long)eaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
362 }
363
Paul Mundt38350e02008-02-13 20:14:10 +0900364 /*
365 * Don't use OCBI to invalidate the lines. That costs cycles
366 * directly. If the dummy block is just left resident, it will
367 * naturally get evicted as required.
368 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
370
Paul Mundt38350e02008-02-13 20:14:10 +0900371/*
372 * Purge the entire contents of the dcache. The most efficient way to
373 * achieve this is to use alloco instructions on a region of unused
374 * memory equal in size to the cache, thereby causing the current
375 * contents to be discarded by natural eviction. The alternative, namely
376 * reading every tag, setting up a mapping for the corresponding page and
377 * doing an OCBP for the line, would be much more expensive.
378 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379static void sh64_dcache_purge_all(void)
380{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383}
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
387 anything else in the kernel */
388#define MAGIC_PAGE0_START 0xffffffffec000000ULL
389
Paul Mundt38350e02008-02-13 20:14:10 +0900390/* Purge the physical page 'paddr' from the cache. It's known that any
391 * cache lines requiring attention have the same page colour as the the
392 * address 'eaddr'.
393 *
394 * This relies on the fact that the D-cache matches on physical tags when
395 * no virtual tag matches. So we create an alias for the original page
396 * and purge through that. (Alternatively, we could have done this by
397 * switching ASID to match the original mapping and purged through that,
398 * but that involves ASID switching cost + probably a TLBMISS + refill
399 * anyway.)
400 */
401static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
402 unsigned long eaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 unsigned long long magic_page_start;
405 unsigned long long magic_eaddr, magic_eaddr_end;
406
407 magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
408
409 /* As long as the kernel is not pre-emptible, this doesn't need to be
410 under cli/sti. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
412
413 magic_eaddr = magic_page_start;
414 magic_eaddr_end = magic_eaddr + PAGE_SIZE;
Paul Mundt38350e02008-02-13 20:14:10 +0900415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 while (magic_eaddr < magic_eaddr_end) {
417 /* Little point in unrolling this loop - the OCBPs are blocking
418 and won't go any quicker (i.e. the loop overhead is parallel
419 to part of the OCBP execution.) */
Paul Mundt38350e02008-02-13 20:14:10 +0900420 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 magic_eaddr += L1_CACHE_BYTES;
422 }
423
424 sh64_teardown_dtlb_cache_slot();
425}
426
Paul Mundt38350e02008-02-13 20:14:10 +0900427/*
428 * Purge a page given its physical start address, by creating a temporary
429 * 1 page mapping and purging across that. Even if we know the virtual
430 * address (& vma or mm) of the page, the method here is more elegant
431 * because it avoids issues of coping with page faults on the purge
432 * instructions (i.e. no special-case code required in the critical path
433 * in the TLB miss handling).
434 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435static void sh64_dcache_purge_phy_page(unsigned long paddr)
436{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 unsigned long long eaddr_start, eaddr, eaddr_end;
438 int i;
439
440 /* As long as the kernel is not pre-emptible, this doesn't need to be
441 under cli/sti. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 eaddr_start = MAGIC_PAGE0_START;
Paul Mundt38350e02008-02-13 20:14:10 +0900443 for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
445
446 eaddr = eaddr_start;
447 eaddr_end = eaddr + PAGE_SIZE;
448 while (eaddr < eaddr_end) {
Paul Mundt38350e02008-02-13 20:14:10 +0900449 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 eaddr += L1_CACHE_BYTES;
451 }
452
453 sh64_teardown_dtlb_cache_slot();
454 eaddr_start += PAGE_SIZE;
455 }
456}
457
Hugh Dickins60ec5582005-10-29 18:16:34 -0700458static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
459 unsigned long addr, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
461 pgd_t *pgd;
Paul Mundt38350e02008-02-13 20:14:10 +0900462 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 pmd_t *pmd;
464 pte_t *pte;
465 pte_t entry;
Hugh Dickins60ec5582005-10-29 18:16:34 -0700466 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 unsigned long paddr;
468
Hugh Dickins60ec5582005-10-29 18:16:34 -0700469 if (!mm)
470 return; /* No way to find physical address of page */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
Hugh Dickins60ec5582005-10-29 18:16:34 -0700472 pgd = pgd_offset(mm, addr);
473 if (pgd_bad(*pgd))
474 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Paul Mundt38350e02008-02-13 20:14:10 +0900476 pud = pud_offset(pgd, addr);
477 if (pud_none(*pud) || pud_bad(*pud))
478 return;
479
480 pmd = pmd_offset(pud, addr);
Hugh Dickins60ec5582005-10-29 18:16:34 -0700481 if (pmd_none(*pmd) || pmd_bad(*pmd))
482 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Hugh Dickins60ec5582005-10-29 18:16:34 -0700484 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
485 do {
486 entry = *pte;
487 if (pte_none(entry) || !pte_present(entry))
488 continue;
489 paddr = pte_val(entry) & PAGE_MASK;
490 sh64_dcache_purge_coloured_phy_page(paddr, addr);
491 } while (pte++, addr += PAGE_SIZE, addr != end);
492 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Paul Mundt38350e02008-02-13 20:14:10 +0900495/*
496 * There are at least 5 choices for the implementation of this, with
497 * pros (+), cons(-), comments(*):
498 *
499 * 1. ocbp each line in the range through the original user's ASID
500 * + no lines spuriously evicted
501 * - tlbmiss handling (must either handle faults on demand => extra
502 * special-case code in tlbmiss critical path), or map the page in
503 * advance (=> flush_tlb_range in advance to avoid multiple hits)
504 * - ASID switching
505 * - expensive for large ranges
506 *
507 * 2. temporarily map each page in the range to a special effective
508 * address and ocbp through the temporary mapping; relies on the
509 * fact that SH-5 OCB* always do TLB lookup and match on ptags (they
510 * never look at the etags)
511 * + no spurious evictions
512 * - expensive for large ranges
513 * * surely cheaper than (1)
514 *
515 * 3. walk all the lines in the cache, check the tags, if a match
516 * occurs create a page mapping to ocbp the line through
517 * + no spurious evictions
518 * - tag inspection overhead
519 * - (especially for small ranges)
520 * - potential cost of setting up/tearing down page mapping for
521 * every line that matches the range
522 * * cost partly independent of range size
523 *
524 * 4. walk all the lines in the cache, check the tags, if a match
525 * occurs use 4 * alloco to purge the line (+3 other probably
526 * innocent victims) by natural eviction
527 * + no tlb mapping overheads
528 * - spurious evictions
529 * - tag inspection overhead
530 *
531 * 5. implement like flush_cache_all
532 * + no tag inspection overhead
533 * - spurious evictions
534 * - bad for small ranges
535 *
536 * (1) can be ruled out as more expensive than (2). (2) appears best
537 * for small ranges. The choice between (3), (4) and (5) for large
538 * ranges and the range size for the large/small boundary need
539 * benchmarking to determine.
540 *
541 * For now use approach (2) for small ranges and (5) for large ones.
542 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543static void sh64_dcache_purge_user_range(struct mm_struct *mm,
544 unsigned long start, unsigned long end)
545{
Paul Mundt38350e02008-02-13 20:14:10 +0900546 int n_pages = ((end - start) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
Hugh Dickins60ec5582005-10-29 18:16:34 -0700548 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 sh64_dcache_purge_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 } else {
Hugh Dickins60ec5582005-10-29 18:16:34 -0700551 /* Small range, covered by a single page table page */
552 start &= PAGE_MASK; /* should already be so */
553 end = PAGE_ALIGN(end); /* should already be so */
554 sh64_dcache_purge_user_pages(mm, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556}
Paul Mundt38350e02008-02-13 20:14:10 +0900557#endif /* !CONFIG_DCACHE_DISABLED */
558
559/*
560 * Invalidate the entire contents of both caches, after writing back to
561 * memory any dirty data from the D-cache.
562 */
563void flush_cache_all(void)
564{
565 sh64_dcache_purge_all();
566 sh64_icache_inv_all();
567}
568
569/*
570 * Invalidate an entire user-address space from both caches, after
571 * writing back dirty data (e.g. for shared mmap etc).
572 *
573 * This could be coded selectively by inspecting all the tags then
574 * doing 4*alloco on any set containing a match (as for
575 * flush_cache_range), but fork/exit/execve (where this is called from)
576 * are expensive anyway.
577 *
578 * Have to do a purge here, despite the comments re I-cache below.
579 * There could be odd-coloured dirty data associated with the mm still
580 * in the cache - if this gets written out through natural eviction
581 * after the kernel has reused the page there will be chaos.
582 *
583 * The mm being torn down won't ever be active again, so any Icache
584 * lines tagged with its ASID won't be visible for the rest of the
585 * lifetime of this ASID cycle. Before the ASID gets reused, there
586 * will be a flush_cache_all. Hence we don't need to touch the
587 * I-cache. This is similar to the lack of action needed in
588 * flush_tlb_mm - see fault.c.
589 */
590void flush_cache_mm(struct mm_struct *mm)
591{
592 sh64_dcache_purge_all();
593}
594
595/*
596 * Invalidate (from both caches) the range [start,end) of virtual
597 * addresses from the user address space specified by mm, after writing
598 * back any dirty data.
599 *
600 * Note, 'end' is 1 byte beyond the end of the range to flush.
601 */
602void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
603 unsigned long end)
604{
605 struct mm_struct *mm = vma->vm_mm;
606
607 sh64_dcache_purge_user_range(mm, start, end);
608 sh64_icache_inv_user_page_range(mm, start, end);
609}
610
611/*
612 * Invalidate any entries in either cache for the vma within the user
613 * address space vma->vm_mm for the page starting at virtual address
614 * 'eaddr'. This seems to be used primarily in breaking COW. Note,
615 * the I-cache must be searched too in case the page in question is
616 * both writable and being executed from (e.g. stack trampolines.)
617 *
618 * Note, this is called with pte lock held.
619 */
620void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
621 unsigned long pfn)
622{
623 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
624
625 if (vma->vm_flags & VM_EXEC)
626 sh64_icache_inv_user_page(vma, eaddr);
627}
628
629void flush_dcache_page(struct page *page)
630{
631 sh64_dcache_purge_phy_page(page_to_phys(page));
632 wmb();
633}
634
635/*
636 * Flush the range [start,end] of kernel virtual adddress space from
637 * the I-cache. The corresponding range must be purged from the
638 * D-cache also because the SH-5 doesn't have cache snooping between
639 * the caches. The addresses will be visible through the superpage
640 * mapping, therefore it's guaranteed that there no cache entries for
641 * the range in cache sets of the wrong colour.
642 */
643void flush_icache_range(unsigned long start, unsigned long end)
644{
645 __flush_purge_region((void *)start, end);
646 wmb();
647 sh64_icache_inv_kernel_range(start, end);
648}
649
650/*
651 * Flush the range of user (defined by vma->vm_mm) address space starting
652 * at 'addr' for 'len' bytes from the cache. The range does not straddle
653 * a page boundary, the unique physical page containing the range is
654 * 'page'. This seems to be used mainly for invalidating an address
655 * range following a poke into the program text through the ptrace() call
656 * from another process (e.g. for BRK instruction insertion).
657 */
658void flush_icache_user_range(struct vm_area_struct *vma,
659 struct page *page, unsigned long addr, int len)
660{
661
662 sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
663 mb();
664
665 if (vma->vm_flags & VM_EXEC)
666 sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
667}
668
669/*
670 * For the address range [start,end), write back the data from the
671 * D-cache and invalidate the corresponding region of the I-cache for the
672 * current process. Used to flush signal trampolines on the stack to
673 * make them executable.
674 */
675void flush_cache_sigtramp(unsigned long vaddr)
676{
677 unsigned long end = vaddr + L1_CACHE_BYTES;
678
679 __flush_wback_region((void *)vaddr, L1_CACHE_BYTES);
680 wmb();
681 sh64_icache_inv_current_user_range(vaddr, end);
682}
683
Paul Mundtccd80582008-04-25 12:58:40 +0900684#ifdef CONFIG_MMU
Paul Mundt38350e02008-02-13 20:14:10 +0900685/*
686 * These *MUST* lie in an area of virtual address space that's otherwise
687 * unused.
688 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689#define UNIQUE_EADDR_START 0xe0000000UL
690#define UNIQUE_EADDR_END 0xe8000000UL
691
Paul Mundt38350e02008-02-13 20:14:10 +0900692/*
693 * Given a physical address paddr, and a user virtual address user_eaddr
694 * which will eventually be mapped to it, create a one-off kernel-private
695 * eaddr mapped to the same paddr. This is used for creating special
696 * destination pages for copy_user_page and clear_user_page.
697 */
698static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
699 unsigned long paddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 static unsigned long current_pointer = UNIQUE_EADDR_START;
702 unsigned long coloured_pointer;
703
704 if (current_pointer == UNIQUE_EADDR_END) {
705 sh64_dcache_purge_all();
706 current_pointer = UNIQUE_EADDR_START;
707 }
708
Paul Mundt38350e02008-02-13 20:14:10 +0900709 coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
710 (user_eaddr & CACHE_OC_SYN_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
712
713 current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
714
715 return coloured_pointer;
716}
717
Paul Mundt38350e02008-02-13 20:14:10 +0900718static void sh64_copy_user_page_coloured(void *to, void *from,
719 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720{
721 void *coloured_to;
722
Paul Mundt38350e02008-02-13 20:14:10 +0900723 /*
724 * Discard any existing cache entries of the wrong colour. These are
725 * present quite often, if the kernel has recently used the page
726 * internally, then given it up, then it's been allocated to the user.
727 */
728 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Paul Mundt38350e02008-02-13 20:14:10 +0900730 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
731 copy_page(from, coloured_to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 sh64_teardown_dtlb_cache_slot();
734}
735
736static void sh64_clear_user_page_coloured(void *to, unsigned long address)
737{
738 void *coloured_to;
739
Paul Mundt38350e02008-02-13 20:14:10 +0900740 /*
741 * Discard any existing kernel-originated lines of the wrong
742 * colour (as above)
743 */
744 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Paul Mundt38350e02008-02-13 20:14:10 +0900746 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
747 clear_page(coloured_to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
749 sh64_teardown_dtlb_cache_slot();
750}
751
Paul Mundt38350e02008-02-13 20:14:10 +0900752/*
753 * 'from' and 'to' are kernel virtual addresses (within the superpage
754 * mapping of the physical RAM). 'address' is the user virtual address
755 * where the copy 'to' will be mapped after. This allows a custom
756 * mapping to be used to ensure that the new copy is placed in the
757 * right cache sets for the user to see it without having to bounce it
758 * out via memory. Note however : the call to flush_page_to_ram in
759 * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
760 * very important case!
761 *
762 * TBD : can we guarantee that on every call, any cache entries for
763 * 'from' are in the same colour sets as 'address' also? i.e. is this
764 * always used just to deal with COW? (I suspect not).
765 *
766 * There are two possibilities here for when the page 'from' was last accessed:
767 * - by the kernel : this is OK, no purge required.
768 * - by the/a user (e.g. for break_COW) : need to purge.
769 *
770 * If the potential user mapping at 'address' is the same colour as
771 * 'from' there is no need to purge any cache lines from the 'from'
772 * page mapped into cache sets of colour 'address'. (The copy will be
773 * accessing the page through 'from').
774 */
775void copy_user_page(void *to, void *from, unsigned long address,
776 struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
Paul Mundt38350e02008-02-13 20:14:10 +0900778 if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 sh64_dcache_purge_coloured_phy_page(__pa(from), address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Paul Mundt38350e02008-02-13 20:14:10 +0900781 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
782 copy_page(to, from);
783 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 sh64_copy_user_page_coloured(to, from, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785}
786
Paul Mundt38350e02008-02-13 20:14:10 +0900787/*
788 * 'to' is a kernel virtual address (within the superpage mapping of the
789 * physical RAM). 'address' is the user virtual address where the 'to'
790 * page will be mapped after. This allows a custom mapping to be used to
791 * ensure that the new copy is placed in the right cache sets for the
792 * user to see it without having to bounce it out via memory.
793 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794void clear_user_page(void *to, unsigned long address, struct page *page)
795{
Paul Mundt38350e02008-02-13 20:14:10 +0900796 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
797 clear_page(to);
798 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 sh64_clear_user_page_coloured(to, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
Paul Mundt0dfae7d2009-07-27 21:30:17 +0900801
802void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
803 unsigned long vaddr, void *dst, const void *src,
804 unsigned long len)
805{
806 flush_cache_page(vma, vaddr, page_to_pfn(page));
807 memcpy(dst, src, len);
808 flush_icache_user_range(vma, page, vaddr, len);
809}
810
811void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
812 unsigned long vaddr, void *dst, const void *src,
813 unsigned long len)
814{
815 flush_cache_page(vma, vaddr, page_to_pfn(page));
816 memcpy(dst, src, len);
817}
Paul Mundtccd80582008-04-25 12:58:40 +0900818#endif