Nicholas Flintham | 1e3d311 | 2013-04-10 10:48:38 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * arch/arm/include/asm/cacheflush.h |
| 3 | * |
| 4 | * Copyright (C) 1999-2002 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #ifndef _ASMARM_CACHEFLUSH_H |
| 11 | #define _ASMARM_CACHEFLUSH_H |
| 12 | |
| 13 | #include <linux/mm.h> |
| 14 | |
| 15 | #include <asm/glue-cache.h> |
| 16 | #include <asm/shmparam.h> |
| 17 | #include <asm/cachetype.h> |
| 18 | #include <asm/outercache.h> |
| 19 | #include <asm/rodata.h> |
| 20 | |
| 21 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) |
| 22 | |
| 23 | #define PG_dcache_clean PG_arch_1 |
| 24 | |
| 25 | /* |
| 26 | * MM Cache Management |
| 27 | * =================== |
| 28 | * |
| 29 | * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files |
| 30 | * implement these methods. |
| 31 | * |
| 32 | * Start addresses are inclusive and end addresses are exclusive; |
| 33 | * start addresses should be rounded down, end addresses up. |
| 34 | * |
| 35 | * See Documentation/cachetlb.txt for more information. |
| 36 | * Please note that the implementation of these, and the required |
| 37 | * effects are cache-type (VIVT/VIPT/PIPT) specific. |
| 38 | * |
| 39 | * flush_icache_all() |
| 40 | * |
| 41 | * Unconditionally clean and invalidate the entire icache. |
| 42 | * Currently only needed for cache-v6.S and cache-v7.S, see |
| 43 | * __flush_icache_all for the generic implementation. |
| 44 | * |
| 45 | * flush_kern_all() |
| 46 | * |
| 47 | * Unconditionally clean and invalidate the entire cache. |
| 48 | * |
| 49 | * flush_user_all() |
| 50 | * |
| 51 | * Clean and invalidate all user space cache entries |
| 52 | * before a change of page tables. |
| 53 | * |
| 54 | * flush_user_range(start, end, flags) |
| 55 | * |
| 56 | * Clean and invalidate a range of cache entries in the |
| 57 | * specified address space before a change of page tables. |
| 58 | * - start - user start address (inclusive, page aligned) |
| 59 | * - end - user end address (exclusive, page aligned) |
| 60 | * - flags - vma->vm_flags field |
| 61 | * |
| 62 | * coherent_kern_range(start, end) |
| 63 | * |
| 64 | * Ensure coherency between the Icache and the Dcache in the |
| 65 | * region described by start, end. If you have non-snooping |
| 66 | * Harvard caches, you need to implement this function. |
| 67 | * - start - virtual start address |
| 68 | * - end - virtual end address |
| 69 | * |
| 70 | * coherent_user_range(start, end) |
| 71 | * |
| 72 | * Ensure coherency between the Icache and the Dcache in the |
| 73 | * region described by start, end. If you have non-snooping |
| 74 | * Harvard caches, you need to implement this function. |
| 75 | * - start - virtual start address |
| 76 | * - end - virtual end address |
| 77 | * |
| 78 | * flush_kern_dcache_area(kaddr, size) |
| 79 | * |
| 80 | * Ensure that the data held in page is written back. |
| 81 | * - kaddr - page address |
| 82 | * - size - region size |
| 83 | * |
| 84 | * DMA Cache Coherency |
| 85 | * =================== |
| 86 | * |
| 87 | * dma_inv_range(start, end) |
| 88 | * |
| 89 | * Invalidate (discard) the specified virtual address range. |
| 90 | * May not write back any entries. If 'start' or 'end' |
| 91 | * are not cache line aligned, those lines must be written |
| 92 | * back. |
| 93 | * - start - virtual start address |
| 94 | * - end - virtual end address |
| 95 | * |
| 96 | * dma_clean_range(start, end) |
| 97 | * |
| 98 | * Clean (write back) the specified virtual address range. |
| 99 | * - start - virtual start address |
| 100 | * - end - virtual end address |
| 101 | * |
| 102 | * dma_flush_range(start, end) |
| 103 | * |
| 104 | * Clean and invalidate the specified virtual address range. |
| 105 | * - start - virtual start address |
| 106 | * - end - virtual end address |
| 107 | */ |
| 108 | |
| 109 | struct cpu_cache_fns { |
| 110 | void (*flush_icache_all)(void); |
| 111 | void (*flush_kern_all)(void); |
| 112 | void (*flush_user_all)(void); |
| 113 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); |
| 114 | |
| 115 | void (*coherent_kern_range)(unsigned long, unsigned long); |
| 116 | void (*coherent_user_range)(unsigned long, unsigned long); |
| 117 | void (*flush_kern_dcache_area)(void *, size_t); |
| 118 | |
| 119 | void (*dma_map_area)(const void *, size_t, int); |
| 120 | void (*dma_unmap_area)(const void *, size_t, int); |
| 121 | |
| 122 | void (*dma_inv_range)(const void *, const void *); |
| 123 | void (*dma_clean_range)(const void *, const void *); |
| 124 | void (*dma_flush_range)(const void *, const void *); |
| 125 | }; |
| 126 | |
| 127 | #ifdef MULTI_CACHE |
| 128 | |
| 129 | extern struct cpu_cache_fns cpu_cache; |
| 130 | |
| 131 | #define __cpuc_flush_icache_all cpu_cache.flush_icache_all |
| 132 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all |
| 133 | #define __cpuc_flush_user_all cpu_cache.flush_user_all |
| 134 | #define __cpuc_flush_user_range cpu_cache.flush_user_range |
| 135 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range |
| 136 | #define __cpuc_coherent_user_range cpu_cache.coherent_user_range |
| 137 | #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area |
| 138 | |
| 139 | /* |
| 140 | * These are private to the dma-mapping API. Do not use directly. |
| 141 | * Their sole purpose is to ensure that data held in the cache |
| 142 | * is visible to DMA, or data written by DMA to system memory is |
| 143 | * visible to the CPU. |
| 144 | */ |
| 145 | #define dmac_map_area cpu_cache.dma_map_area |
| 146 | #define dmac_unmap_area cpu_cache.dma_unmap_area |
| 147 | #define dmac_inv_range cpu_cache.dma_inv_range |
| 148 | #define dmac_clean_range cpu_cache.dma_clean_range |
| 149 | #define dmac_flush_range cpu_cache.dma_flush_range |
| 150 | |
| 151 | #else |
| 152 | |
| 153 | extern void __cpuc_flush_icache_all(void); |
| 154 | extern void __cpuc_flush_kern_all(void); |
| 155 | extern void __cpuc_flush_user_all(void); |
| 156 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); |
| 157 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); |
| 158 | extern void __cpuc_coherent_user_range(unsigned long, unsigned long); |
| 159 | extern void __cpuc_flush_dcache_area(void *, size_t); |
| 160 | |
| 161 | /* |
| 162 | * These are private to the dma-mapping API. Do not use directly. |
| 163 | * Their sole purpose is to ensure that data held in the cache |
| 164 | * is visible to DMA, or data written by DMA to system memory is |
| 165 | * visible to the CPU. |
| 166 | */ |
| 167 | extern void dmac_map_area(const void *, size_t, int); |
| 168 | extern void dmac_unmap_area(const void *, size_t, int); |
| 169 | extern void dmac_inv_range(const void *, const void *); |
| 170 | extern void dmac_clean_range(const void *, const void *); |
| 171 | extern void dmac_flush_range(const void *, const void *); |
| 172 | |
| 173 | #endif |
| 174 | |
| 175 | extern void copy_to_user_page(struct vm_area_struct *, struct page *, |
| 176 | unsigned long, void *, const void *, unsigned long); |
| 177 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
| 178 | do { \ |
| 179 | memcpy(dst, src, len); \ |
| 180 | } while (0) |
| 181 | |
| 182 | |
| 183 | #define __flush_icache_all_generic() \ |
| 184 | asm("mcr p15, 0, %0, c7, c5, 0" \ |
| 185 | : : "r" (0)); |
| 186 | |
| 187 | #define __flush_icache_all_v7_smp() \ |
| 188 | asm("mcr p15, 0, %0, c7, c1, 0" \ |
| 189 | : : "r" (0)); |
| 190 | |
| 191 | #if (defined(CONFIG_CPU_V7) && \ |
| 192 | (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ |
| 193 | defined(CONFIG_SMP_ON_UP) |
| 194 | #define __flush_icache_preferred __cpuc_flush_icache_all |
| 195 | #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) |
| 196 | #define __flush_icache_preferred __flush_icache_all_v7_smp |
| 197 | #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920) |
| 198 | #define __flush_icache_preferred __cpuc_flush_icache_all |
| 199 | #else |
| 200 | #define __flush_icache_preferred __flush_icache_all_generic |
| 201 | #endif |
| 202 | |
| 203 | static inline void __flush_icache_all(void) |
| 204 | { |
| 205 | __flush_icache_preferred(); |
| 206 | } |
| 207 | |
| 208 | #define flush_cache_all() __cpuc_flush_kern_all() |
| 209 | |
| 210 | static inline void vivt_flush_cache_mm(struct mm_struct *mm) |
| 211 | { |
| 212 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
| 213 | __cpuc_flush_user_all(); |
| 214 | } |
| 215 | |
| 216 | static inline void |
| 217 | vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 218 | { |
| 219 | struct mm_struct *mm = vma->vm_mm; |
| 220 | |
| 221 | if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
| 222 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), |
| 223 | vma->vm_flags); |
| 224 | } |
| 225 | |
| 226 | static inline void |
| 227 | vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
| 228 | { |
| 229 | struct mm_struct *mm = vma->vm_mm; |
| 230 | |
| 231 | if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { |
| 232 | unsigned long addr = user_addr & PAGE_MASK; |
| 233 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | #ifndef CONFIG_CPU_CACHE_VIPT |
| 238 | #define flush_cache_mm(mm) \ |
| 239 | vivt_flush_cache_mm(mm) |
| 240 | #define flush_cache_range(vma,start,end) \ |
| 241 | vivt_flush_cache_range(vma,start,end) |
| 242 | #define flush_cache_page(vma,addr,pfn) \ |
| 243 | vivt_flush_cache_page(vma,addr,pfn) |
| 244 | #else |
| 245 | extern void flush_cache_mm(struct mm_struct *mm); |
| 246 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
| 247 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); |
| 248 | #endif |
| 249 | |
| 250 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
| 251 | |
| 252 | #define flush_cache_user_range(start,end) \ |
| 253 | __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) |
| 254 | |
| 255 | #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) |
| 256 | |
| 257 | /* |
| 258 | * Perform necessary cache operations to ensure that the TLB will |
| 259 | * see data written in the specified area. |
| 260 | */ |
| 261 | #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) |
| 262 | |
| 263 | /* |
| 264 | * flush_dcache_page is used when the kernel has written to the page |
| 265 | * cache page at virtual address page->virtual. |
| 266 | * |
| 267 | * If this page isn't mapped (ie, page_mapping == NULL), or it might |
| 268 | * have userspace mappings, then we _must_ always clean + invalidate |
| 269 | * the dcache entries associated with the kernel mapping. |
| 270 | * |
| 271 | * Otherwise we can defer the operation, and clean the cache when we are |
| 272 | * about to change to user space. This is the same method as used on SPARC64. |
| 273 | * See update_mmu_cache for the user space part. |
| 274 | */ |
| 275 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
| 276 | extern void flush_dcache_page(struct page *); |
| 277 | |
| 278 | static inline void flush_kernel_vmap_range(void *addr, int size) |
| 279 | { |
| 280 | if ((cache_is_vivt() || cache_is_vipt_aliasing())) |
| 281 | __cpuc_flush_dcache_area(addr, (size_t)size); |
| 282 | } |
| 283 | static inline void invalidate_kernel_vmap_range(void *addr, int size) |
| 284 | { |
| 285 | if ((cache_is_vivt() || cache_is_vipt_aliasing())) |
| 286 | __cpuc_flush_dcache_area(addr, (size_t)size); |
| 287 | } |
| 288 | |
| 289 | #define ARCH_HAS_FLUSH_ANON_PAGE |
| 290 | static inline void flush_anon_page(struct vm_area_struct *vma, |
| 291 | struct page *page, unsigned long vmaddr) |
| 292 | { |
| 293 | extern void __flush_anon_page(struct vm_area_struct *vma, |
| 294 | struct page *, unsigned long); |
| 295 | if (PageAnon(page)) |
| 296 | __flush_anon_page(vma, page, vmaddr); |
| 297 | } |
| 298 | |
| 299 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
| 300 | static inline void flush_kernel_dcache_page(struct page *page) |
| 301 | { |
| 302 | } |
| 303 | |
| 304 | #define flush_dcache_mmap_lock(mapping) \ |
| 305 | spin_lock_irq(&(mapping)->tree_lock) |
| 306 | #define flush_dcache_mmap_unlock(mapping) \ |
| 307 | spin_unlock_irq(&(mapping)->tree_lock) |
| 308 | |
| 309 | #define flush_icache_user_range(vma,page,addr,len) \ |
| 310 | flush_dcache_page(page) |
| 311 | |
| 312 | #define flush_icache_page(vma,page) do { } while (0) |
| 313 | |
| 314 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
| 315 | { |
| 316 | if (!cache_is_vipt_nonaliasing()) |
| 317 | flush_cache_all(); |
| 318 | else |
| 319 | dsb(); |
| 320 | } |
| 321 | |
| 322 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
| 323 | { |
| 324 | if (!cache_is_vipt_nonaliasing()) |
| 325 | flush_cache_all(); |
| 326 | } |
| 327 | |
| 328 | #endif |