| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/include/asm-arm/cacheflush.h | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1999-2002 Russell King | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  */ | 
 | 10 | #ifndef _ASMARM_CACHEFLUSH_H | 
 | 11 | #define _ASMARM_CACHEFLUSH_H | 
 | 12 |  | 
 | 13 | #include <linux/config.h> | 
 | 14 | #include <linux/sched.h> | 
 | 15 | #include <linux/mm.h> | 
 | 16 |  | 
 | 17 | #include <asm/mman.h> | 
 | 18 | #include <asm/glue.h> | 
| Russell King | b8a9b66 | 2005-06-20 11:31:09 +0100 | [diff] [blame] | 19 | #include <asm/shmparam.h> | 
 | 20 |  | 
 | 21 | #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 |  | 
 | 23 | /* | 
 | 24 |  *	Cache Model | 
 | 25 |  *	=========== | 
 | 26 |  */ | 
 | 27 | #undef _CACHE | 
 | 28 | #undef MULTI_CACHE | 
 | 29 |  | 
 | 30 | #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) | 
 | 31 | # ifdef _CACHE | 
 | 32 | #  define MULTI_CACHE 1 | 
 | 33 | # else | 
 | 34 | #  define _CACHE v3 | 
 | 35 | # endif | 
 | 36 | #endif | 
 | 37 |  | 
 | 38 | #if defined(CONFIG_CPU_ARM720T) | 
 | 39 | # ifdef _CACHE | 
 | 40 | #  define MULTI_CACHE 1 | 
 | 41 | # else | 
 | 42 | #  define _CACHE v4 | 
 | 43 | # endif | 
 | 44 | #endif | 
 | 45 |  | 
 | 46 | #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ | 
 | 47 |     defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) | 
 | 48 | # define MULTI_CACHE 1 | 
 | 49 | #endif | 
 | 50 |  | 
 | 51 | #if defined(CONFIG_CPU_ARM926T) | 
 | 52 | # ifdef _CACHE | 
 | 53 | #  define MULTI_CACHE 1 | 
 | 54 | # else | 
 | 55 | #  define _CACHE arm926 | 
 | 56 | # endif | 
 | 57 | #endif | 
 | 58 |  | 
 | 59 | #if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) | 
 | 60 | # ifdef _CACHE | 
 | 61 | #  define MULTI_CACHE 1 | 
 | 62 | # else | 
 | 63 | #  define _CACHE v4wb | 
 | 64 | # endif | 
 | 65 | #endif | 
 | 66 |  | 
 | 67 | #if defined(CONFIG_CPU_XSCALE) | 
 | 68 | # ifdef _CACHE | 
 | 69 | #  define MULTI_CACHE 1 | 
 | 70 | # else | 
 | 71 | #  define _CACHE xscale | 
 | 72 | # endif | 
 | 73 | #endif | 
 | 74 |  | 
 | 75 | #if defined(CONFIG_CPU_V6) | 
 | 76 | //# ifdef _CACHE | 
 | 77 | #  define MULTI_CACHE 1 | 
 | 78 | //# else | 
 | 79 | //#  define _CACHE v6 | 
 | 80 | //# endif | 
 | 81 | #endif | 
 | 82 |  | 
 | 83 | #if !defined(_CACHE) && !defined(MULTI_CACHE) | 
 | 84 | #error Unknown cache maintainence model | 
 | 85 | #endif | 
 | 86 |  | 
 | 87 | /* | 
 | 88 |  * This flag is used to indicate that the page pointed to by a pte | 
 | 89 |  * is dirty and requires cleaning before returning it to the user. | 
 | 90 |  */ | 
 | 91 | #define PG_dcache_dirty PG_arch_1 | 
 | 92 |  | 
 | 93 | /* | 
 | 94 |  *	MM Cache Management | 
 | 95 |  *	=================== | 
 | 96 |  * | 
 | 97 |  *	The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files | 
 | 98 |  *	implement these methods. | 
 | 99 |  * | 
 | 100 |  *	Start addresses are inclusive and end addresses are exclusive; | 
 | 101 |  *	start addresses should be rounded down, end addresses up. | 
 | 102 |  * | 
 | 103 |  *	See Documentation/cachetlb.txt for more information. | 
 | 104 |  *	Please note that the implementation of these, and the required | 
 | 105 |  *	effects are cache-type (VIVT/VIPT/PIPT) specific. | 
 | 106 |  * | 
 | 107 |  *	flush_cache_kern_all() | 
 | 108 |  * | 
 | 109 |  *		Unconditionally clean and invalidate the entire cache. | 
 | 110 |  * | 
 | 111 |  *	flush_cache_user_mm(mm) | 
 | 112 |  * | 
 | 113 |  *		Clean and invalidate all user space cache entries | 
 | 114 |  *		before a change of page tables. | 
 | 115 |  * | 
 | 116 |  *	flush_cache_user_range(start, end, flags) | 
 | 117 |  * | 
 | 118 |  *		Clean and invalidate a range of cache entries in the | 
 | 119 |  *		specified address space before a change of page tables. | 
 | 120 |  *		- start - user start address (inclusive, page aligned) | 
 | 121 |  *		- end   - user end address   (exclusive, page aligned) | 
 | 122 |  *		- flags - vma->vm_flags field | 
 | 123 |  * | 
 | 124 |  *	coherent_kern_range(start, end) | 
 | 125 |  * | 
 | 126 |  *		Ensure coherency between the Icache and the Dcache in the | 
 | 127 |  *		region described by start, end.  If you have non-snooping | 
 | 128 |  *		Harvard caches, you need to implement this function. | 
 | 129 |  *		- start  - virtual start address | 
 | 130 |  *		- end    - virtual end address | 
 | 131 |  * | 
 | 132 |  *	DMA Cache Coherency | 
 | 133 |  *	=================== | 
 | 134 |  * | 
 | 135 |  *	dma_inv_range(start, end) | 
 | 136 |  * | 
 | 137 |  *		Invalidate (discard) the specified virtual address range. | 
 | 138 |  *		May not write back any entries.  If 'start' or 'end' | 
 | 139 |  *		are not cache line aligned, those lines must be written | 
 | 140 |  *		back. | 
 | 141 |  *		- start  - virtual start address | 
 | 142 |  *		- end    - virtual end address | 
 | 143 |  * | 
 | 144 |  *	dma_clean_range(start, end) | 
 | 145 |  * | 
 | 146 |  *		Clean (write back) the specified virtual address range. | 
 | 147 |  *		- start  - virtual start address | 
 | 148 |  *		- end    - virtual end address | 
 | 149 |  * | 
 | 150 |  *	dma_flush_range(start, end) | 
 | 151 |  * | 
 | 152 |  *		Clean and invalidate the specified virtual address range. | 
 | 153 |  *		- start  - virtual start address | 
 | 154 |  *		- end    - virtual end address | 
 | 155 |  */ | 
 | 156 |  | 
 | 157 | struct cpu_cache_fns { | 
 | 158 | 	void (*flush_kern_all)(void); | 
 | 159 | 	void (*flush_user_all)(void); | 
 | 160 | 	void (*flush_user_range)(unsigned long, unsigned long, unsigned int); | 
 | 161 |  | 
 | 162 | 	void (*coherent_kern_range)(unsigned long, unsigned long); | 
 | 163 | 	void (*coherent_user_range)(unsigned long, unsigned long); | 
 | 164 | 	void (*flush_kern_dcache_page)(void *); | 
 | 165 |  | 
 | 166 | 	void (*dma_inv_range)(unsigned long, unsigned long); | 
 | 167 | 	void (*dma_clean_range)(unsigned long, unsigned long); | 
 | 168 | 	void (*dma_flush_range)(unsigned long, unsigned long); | 
 | 169 | }; | 
 | 170 |  | 
 | 171 | /* | 
 | 172 |  * Select the calling method | 
 | 173 |  */ | 
 | 174 | #ifdef MULTI_CACHE | 
 | 175 |  | 
 | 176 | extern struct cpu_cache_fns cpu_cache; | 
 | 177 |  | 
 | 178 | #define __cpuc_flush_kern_all		cpu_cache.flush_kern_all | 
 | 179 | #define __cpuc_flush_user_all		cpu_cache.flush_user_all | 
 | 180 | #define __cpuc_flush_user_range		cpu_cache.flush_user_range | 
 | 181 | #define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range | 
 | 182 | #define __cpuc_coherent_user_range	cpu_cache.coherent_user_range | 
 | 183 | #define __cpuc_flush_dcache_page	cpu_cache.flush_kern_dcache_page | 
 | 184 |  | 
 | 185 | /* | 
 | 186 |  * These are private to the dma-mapping API.  Do not use directly. | 
 | 187 |  * Their sole purpose is to ensure that data held in the cache | 
 | 188 |  * is visible to DMA, or data written by DMA to system memory is | 
 | 189 |  * visible to the CPU. | 
 | 190 |  */ | 
 | 191 | #define dmac_inv_range			cpu_cache.dma_inv_range | 
 | 192 | #define dmac_clean_range		cpu_cache.dma_clean_range | 
 | 193 | #define dmac_flush_range		cpu_cache.dma_flush_range | 
 | 194 |  | 
 | 195 | #else | 
 | 196 |  | 
 | 197 | #define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all) | 
 | 198 | #define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all) | 
 | 199 | #define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range) | 
 | 200 | #define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range) | 
 | 201 | #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range) | 
 | 202 | #define __cpuc_flush_dcache_page	__glue(_CACHE,_flush_kern_dcache_page) | 
 | 203 |  | 
 | 204 | extern void __cpuc_flush_kern_all(void); | 
 | 205 | extern void __cpuc_flush_user_all(void); | 
 | 206 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); | 
 | 207 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); | 
 | 208 | extern void __cpuc_coherent_user_range(unsigned long, unsigned long); | 
 | 209 | extern void __cpuc_flush_dcache_page(void *); | 
 | 210 |  | 
 | 211 | /* | 
 | 212 |  * These are private to the dma-mapping API.  Do not use directly. | 
 | 213 |  * Their sole purpose is to ensure that data held in the cache | 
 | 214 |  * is visible to DMA, or data written by DMA to system memory is | 
 | 215 |  * visible to the CPU. | 
 | 216 |  */ | 
 | 217 | #define dmac_inv_range			__glue(_CACHE,_dma_inv_range) | 
 | 218 | #define dmac_clean_range		__glue(_CACHE,_dma_clean_range) | 
 | 219 | #define dmac_flush_range		__glue(_CACHE,_dma_flush_range) | 
 | 220 |  | 
 | 221 | extern void dmac_inv_range(unsigned long, unsigned long); | 
 | 222 | extern void dmac_clean_range(unsigned long, unsigned long); | 
 | 223 | extern void dmac_flush_range(unsigned long, unsigned long); | 
 | 224 |  | 
 | 225 | #endif | 
 | 226 |  | 
 | 227 | /* | 
 | 228 |  * flush_cache_vmap() is used when creating mappings (eg, via vmap, | 
 | 229 |  * vmalloc, ioremap etc) in kernel space for pages.  Since the | 
 | 230 |  * direct-mappings of these pages may contain cached data, we need | 
 | 231 |  * to do a full cache flush to ensure that writebacks don't corrupt | 
 | 232 |  * data placed into these pages via the new mappings. | 
 | 233 |  */ | 
 | 234 | #define flush_cache_vmap(start, end)		flush_cache_all() | 
 | 235 | #define flush_cache_vunmap(start, end)		flush_cache_all() | 
 | 236 |  | 
 | 237 | /* | 
 | 238 |  * Copy user data from/to a page which is mapped into a different | 
 | 239 |  * processes address space.  Really, we want to allow our "user | 
 | 240 |  * space" model to handle this. | 
 | 241 |  */ | 
 | 242 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 
 | 243 | 	do {							\ | 
 | 244 | 		flush_cache_page(vma, vaddr, page_to_pfn(page));\ | 
 | 245 | 		memcpy(dst, src, len);				\ | 
 | 246 | 		flush_dcache_page(page);			\ | 
 | 247 | 	} while (0) | 
 | 248 |  | 
 | 249 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 
 | 250 | 	do {							\ | 
 | 251 | 		flush_cache_page(vma, vaddr, page_to_pfn(page));\ | 
 | 252 | 		memcpy(dst, src, len);				\ | 
 | 253 | 	} while (0) | 
 | 254 |  | 
 | 255 | /* | 
 | 256 |  * Convert calls to our calling convention. | 
 | 257 |  */ | 
 | 258 | #define flush_cache_all()		__cpuc_flush_kern_all() | 
 | 259 |  | 
 | 260 | static inline void flush_cache_mm(struct mm_struct *mm) | 
 | 261 | { | 
 | 262 | 	if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | 
 | 263 | 		__cpuc_flush_user_all(); | 
 | 264 | } | 
 | 265 |  | 
 | 266 | static inline void | 
 | 267 | flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 
 | 268 | { | 
 | 269 | 	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) | 
 | 270 | 		__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | 
 | 271 | 					vma->vm_flags); | 
 | 272 | } | 
 | 273 |  | 
 | 274 | static inline void | 
 | 275 | flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | 
 | 276 | { | 
 | 277 | 	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | 
 | 278 | 		unsigned long addr = user_addr & PAGE_MASK; | 
 | 279 | 		__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | 
 | 280 | 	} | 
 | 281 | } | 
 | 282 |  | 
 | 283 | /* | 
 | 284 |  * flush_cache_user_range is used when we want to ensure that the | 
 | 285 |  * Harvard caches are synchronised for the user space address range. | 
 | 286 |  * This is used for the ARM private sys_cacheflush system call. | 
 | 287 |  */ | 
 | 288 | #define flush_cache_user_range(vma,start,end) \ | 
 | 289 | 	__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) | 
 | 290 |  | 
 | 291 | /* | 
 | 292 |  * Perform necessary cache operations to ensure that data previously | 
 | 293 |  * stored within this range of addresses can be executed by the CPU. | 
 | 294 |  */ | 
 | 295 | #define flush_icache_range(s,e)		__cpuc_coherent_kern_range(s,e) | 
 | 296 |  | 
 | 297 | /* | 
 | 298 |  * Perform necessary cache operations to ensure that the TLB will | 
 | 299 |  * see data written in the specified area. | 
 | 300 |  */ | 
 | 301 | #define clean_dcache_area(start,size)	cpu_dcache_clean_area(start, size) | 
 | 302 |  | 
 | 303 | /* | 
 | 304 |  * flush_dcache_page is used when the kernel has written to the page | 
 | 305 |  * cache page at virtual address page->virtual. | 
 | 306 |  * | 
 | 307 |  * If this page isn't mapped (ie, page_mapping == NULL), or it might | 
 | 308 |  * have userspace mappings, then we _must_ always clean + invalidate | 
 | 309 |  * the dcache entries associated with the kernel mapping. | 
 | 310 |  * | 
 | 311 |  * Otherwise we can defer the operation, and clean the cache when we are | 
 | 312 |  * about to change to user space.  This is the same method as used on SPARC64. | 
 | 313 |  * See update_mmu_cache for the user space part. | 
 | 314 |  */ | 
 | 315 | extern void flush_dcache_page(struct page *); | 
 | 316 |  | 
 | 317 | #define flush_dcache_mmap_lock(mapping) \ | 
 | 318 | 	write_lock_irq(&(mapping)->tree_lock) | 
 | 319 | #define flush_dcache_mmap_unlock(mapping) \ | 
 | 320 | 	write_unlock_irq(&(mapping)->tree_lock) | 
 | 321 |  | 
 | 322 | #define flush_icache_user_range(vma,page,addr,len) \ | 
 | 323 | 	flush_dcache_page(page) | 
 | 324 |  | 
 | 325 | /* | 
 | 326 |  * We don't appear to need to do anything here.  In fact, if we did, we'd | 
 | 327 |  * duplicate cache flushing elsewhere performed by flush_dcache_page(). | 
 | 328 |  */ | 
 | 329 | #define flush_icache_page(vma,page)	do { } while (0) | 
 | 330 |  | 
 | 331 | #define __cacheid_present(val)		(val != read_cpuid(CPUID_ID)) | 
 | 332 | #define __cacheid_vivt(val)		((val & (15 << 25)) != (14 << 25)) | 
 | 333 | #define __cacheid_vipt(val)		((val & (15 << 25)) == (14 << 25)) | 
 | 334 | #define __cacheid_vipt_nonaliasing(val)	((val & (15 << 25 | 1 << 23)) == (14 << 25)) | 
 | 335 | #define __cacheid_vipt_aliasing(val)	((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) | 
 | 336 |  | 
 | 337 | #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) | 
 | 338 |  | 
 | 339 | #define cache_is_vivt()			1 | 
 | 340 | #define cache_is_vipt()			0 | 
 | 341 | #define cache_is_vipt_nonaliasing()	0 | 
 | 342 | #define cache_is_vipt_aliasing()	0 | 
 | 343 |  | 
 | 344 | #elif defined(CONFIG_CPU_CACHE_VIPT) | 
 | 345 |  | 
 | 346 | #define cache_is_vivt()			0 | 
 | 347 | #define cache_is_vipt()			1 | 
 | 348 | #define cache_is_vipt_nonaliasing()					\ | 
 | 349 | 	({								\ | 
 | 350 | 		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\ | 
 | 351 | 		__cacheid_vipt_nonaliasing(__val);			\ | 
 | 352 | 	}) | 
 | 353 |  | 
 | 354 | #define cache_is_vipt_aliasing()					\ | 
 | 355 | 	({								\ | 
 | 356 | 		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\ | 
 | 357 | 		__cacheid_vipt_aliasing(__val);				\ | 
 | 358 | 	}) | 
 | 359 |  | 
 | 360 | #else | 
 | 361 |  | 
 | 362 | #define cache_is_vivt()							\ | 
 | 363 | 	({								\ | 
 | 364 | 		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\ | 
 | 365 | 		(!__cacheid_present(__val)) || __cacheid_vivt(__val);	\ | 
 | 366 | 	}) | 
 | 367 | 		 | 
 | 368 | #define cache_is_vipt()							\ | 
 | 369 | 	({								\ | 
 | 370 | 		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\ | 
 | 371 | 		__cacheid_present(__val) && __cacheid_vipt(__val);	\ | 
 | 372 | 	}) | 
 | 373 |  | 
 | 374 | #define cache_is_vipt_nonaliasing()					\ | 
 | 375 | 	({								\ | 
 | 376 | 		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\ | 
 | 377 | 		__cacheid_present(__val) &&				\ | 
 | 378 | 		 __cacheid_vipt_nonaliasing(__val);			\ | 
 | 379 | 	}) | 
 | 380 |  | 
 | 381 | #define cache_is_vipt_aliasing()					\ | 
 | 382 | 	({								\ | 
 | 383 | 		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\ | 
 | 384 | 		__cacheid_present(__val) &&				\ | 
 | 385 | 		 __cacheid_vipt_aliasing(__val);			\ | 
 | 386 | 	}) | 
 | 387 |  | 
 | 388 | #endif | 
 | 389 |  | 
 | 390 | #endif |