blob: 97f5a64c2ab8c754ad6dfe33e7b068cf4c60f628 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-sh/cpu-sh3/cacheflush.h
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#ifndef __ASM_CPU_SH3_CACHEFLUSH_H
11#define __ASM_CPU_SH3_CACHEFLUSH_H
12
Paul Mundt26ff6c12006-09-27 15:13:36 +090013/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * Cache flushing:
15 *
16 * - flush_cache_all() flushes entire cache
17 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
18 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
19 * - flush_cache_range(vma, start, end) flushes a range of pages
20 *
21 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
22 * - flush_icache_range(start, end) flushes(invalidates) a range for icache
23 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
24 *
25 * Caches are indexed (effectively) by physical address on SH-3, so
26 * we don't need them.
27 */
28
29#if defined(CONFIG_SH7705_CACHE_32KB)
30
31/* SH7705 is an SH3 processor with 32KB cache. This has alias issues like the
32 * SH4. Unlike the SH4 this is a unified cache so we need to do some work
33 * in mmap when 'exec'ing a new binary
34 */
35 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
36#define CACHE_ALIAS 0x00001000
37
Paul Mundtc4706622006-09-27 15:29:18 +090038#define PG_mapped PG_arch_1
39
40void flush_cache_all(void);
41void flush_cache_mm(struct mm_struct *mm);
42void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 unsigned long end);
Paul Mundtc4706622006-09-27 15:29:18 +090044void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
45void flush_dcache_page(struct page *pg);
46void flush_icache_range(unsigned long start, unsigned long end);
47void flush_icache_page(struct vm_area_struct *vma, struct page *page);
48#else
49#define flush_cache_all() do { } while (0)
50#define flush_cache_mm(mm) do { } while (0)
51#define flush_cache_range(vma, start, end) do { } while (0)
52#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
53#define flush_dcache_page(page) do { } while (0)
54#define flush_icache_range(start, end) do { } while (0)
55#define flush_icache_page(vma,pg) do { } while (0)
56#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58#define flush_dcache_mmap_lock(mapping) do { } while (0)
59#define flush_dcache_mmap_unlock(mapping) do { } while (0)
60
61/* SH3 has unified cache so no special action needed here */
62#define flush_cache_sigtramp(vaddr) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
64
65#define p3_cache_init() do { } while (0)
66
Paul Mundtc4706622006-09-27 15:29:18 +090067/*
68 * We provide our own get_unmapped_area to avoid cache aliasing issues
69 * on SH7705 with a 32KB cache, and to page align addresses in the
70 * non-aliasing case.
71 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#define HAVE_ARCH_UNMAPPED_AREA
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
75