| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/arm/mm/fault-armv.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1995  Linus Torvalds | 
|  | 5 | *  Modifications for ARM processor (c) 1995-2002 Russell King | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or modify | 
|  | 8 | * it under the terms of the GNU General Public License version 2 as | 
|  | 9 | * published by the Free Software Foundation. | 
|  | 10 | */ | 
|  | 11 | #include <linux/module.h> | 
|  | 12 | #include <linux/sched.h> | 
|  | 13 | #include <linux/kernel.h> | 
|  | 14 | #include <linux/mm.h> | 
|  | 15 | #include <linux/bitops.h> | 
|  | 16 | #include <linux/vmalloc.h> | 
|  | 17 | #include <linux/init.h> | 
|  | 18 | #include <linux/pagemap.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 19 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  | 
| Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 21 | #include <asm/bugs.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <asm/cacheflush.h> | 
| Russell King | 46097c7 | 2008-08-10 18:10:19 +0100 | [diff] [blame] | 23 | #include <asm/cachetype.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <asm/pgtable.h> | 
|  | 25 | #include <asm/tlbflush.h> | 
|  | 26 |  | 
| Russell King | 7b0a100 | 2009-10-24 14:11:59 +0100 | [diff] [blame] | 27 | #include "mm.h" | 
|  | 28 |  | 
| Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 29 | static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  | 
| Catalin Marinas | 6012191 | 2010-09-13 15:58:06 +0100 | [diff] [blame] | 31 | #if __LINUX_ARM_ARCH__ < 6 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* | 
|  | 33 | * We take the easy way out of this problem - we make the | 
|  | 34 | * PTE uncacheable.  However, we leave the write buffer on. | 
| Hugh Dickins | 69b0475 | 2005-10-29 18:16:36 -0700 | [diff] [blame] | 35 | * | 
|  | 36 | * Note that the pte lock held when calling update_mmu_cache must also | 
|  | 37 | * guard the pte (somewhere else in the same mm) that we modify here. | 
|  | 38 | * Therefore those configurations which might call adjust_pte (those | 
|  | 39 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | */ | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 41 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | 
| Russell King | ed42aca | 2009-12-18 16:31:38 +0000 | [diff] [blame] | 42 | unsigned long pfn, pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | { | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 44 | pte_t entry = *ptep; | 
| Russell King | 53cdb27 | 2008-07-27 10:35:54 +0100 | [diff] [blame] | 45 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | /* | 
| Russell King | 53cdb27 | 2008-07-27 10:35:54 +0100 | [diff] [blame] | 48 | * If this page is present, it's actually being shared. | 
|  | 49 | */ | 
|  | 50 | ret = pte_present(entry); | 
|  | 51 |  | 
|  | 52 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | * If this page isn't present, or is already setup to | 
|  | 54 | * fault (ie, is old), we can safely ignore any issues. | 
|  | 55 | */ | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 56 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { | 
| Nicolas Pitre | 08e445b | 2009-01-16 23:02:54 +0100 | [diff] [blame] | 57 | flush_cache_page(vma, address, pfn); | 
|  | 58 | outer_flush_range((pfn << PAGE_SHIFT), | 
|  | 59 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 60 | pte_val(entry) &= ~L_PTE_MT_MASK; | 
|  | 61 | pte_val(entry) |= shared_pte_mask; | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 62 | set_pte_at(vma->vm_mm, address, ptep, entry); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | flush_tlb_page(vma, address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | } | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 65 |  | 
|  | 66 | return ret; | 
|  | 67 | } | 
|  | 68 |  | 
| Mika Westerberg | 4e54d93 | 2010-10-28 11:45:22 +0100 | [diff] [blame] | 69 | #if USE_SPLIT_PTLOCKS | 
|  | 70 | /* | 
|  | 71 | * If we are using split PTE locks, then we need to take the page | 
|  | 72 | * lock here.  Otherwise we are using shared mm->page_table_lock | 
|  | 73 | * which is already locked, thus cannot take it. | 
|  | 74 | */ | 
|  | 75 | static inline void do_pte_lock(spinlock_t *ptl) | 
|  | 76 | { | 
|  | 77 | /* | 
|  | 78 | * Use nested version here to indicate that we are already | 
|  | 79 | * holding one similar spinlock. | 
|  | 80 | */ | 
|  | 81 | spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | static inline void do_pte_unlock(spinlock_t *ptl) | 
|  | 85 | { | 
|  | 86 | spin_unlock(ptl); | 
|  | 87 | } | 
|  | 88 | #else /* !USE_SPLIT_PTLOCKS */ | 
|  | 89 | static inline void do_pte_lock(spinlock_t *ptl) {} | 
|  | 90 | static inline void do_pte_unlock(spinlock_t *ptl) {} | 
|  | 91 | #endif /* USE_SPLIT_PTLOCKS */ | 
|  | 92 |  | 
| Russell King | ed42aca | 2009-12-18 16:31:38 +0000 | [diff] [blame] | 93 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | 
|  | 94 | unsigned long pfn) | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 95 | { | 
| Russell King | 56dd470 | 2009-12-18 16:24:34 +0000 | [diff] [blame] | 96 | spinlock_t *ptl; | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 97 | pgd_t *pgd; | 
| Russell King | 516295e | 2010-11-21 16:27:49 +0000 | [diff] [blame] | 98 | pud_t *pud; | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 99 | pmd_t *pmd; | 
|  | 100 | pte_t *pte; | 
|  | 101 | int ret; | 
|  | 102 |  | 
|  | 103 | pgd = pgd_offset(vma->vm_mm, address); | 
| Russell King | f8a85f1 | 2009-12-18 16:23:44 +0000 | [diff] [blame] | 104 | if (pgd_none_or_clear_bad(pgd)) | 
|  | 105 | return 0; | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 106 |  | 
| Russell King | 516295e | 2010-11-21 16:27:49 +0000 | [diff] [blame] | 107 | pud = pud_offset(pgd, address); | 
|  | 108 | if (pud_none_or_clear_bad(pud)) | 
|  | 109 | return 0; | 
|  | 110 |  | 
|  | 111 | pmd = pmd_offset(pud, address); | 
| Russell King | f8a85f1 | 2009-12-18 16:23:44 +0000 | [diff] [blame] | 112 | if (pmd_none_or_clear_bad(pmd)) | 
|  | 113 | return 0; | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 114 |  | 
| Russell King | 56dd470 | 2009-12-18 16:24:34 +0000 | [diff] [blame] | 115 | /* | 
|  | 116 | * This is called while another page table is mapped, so we | 
|  | 117 | * must use the nested version.  This also means we need to | 
|  | 118 | * open-code the spin-locking. | 
|  | 119 | */ | 
|  | 120 | ptl = pte_lockptr(vma->vm_mm, pmd); | 
| Peter Zijlstra | ece0e2b | 2010-10-26 14:21:52 -0700 | [diff] [blame] | 121 | pte = pte_offset_map(pmd, address); | 
| Mika Westerberg | 4e54d93 | 2010-10-28 11:45:22 +0100 | [diff] [blame] | 122 | do_pte_lock(ptl); | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 123 |  | 
| Russell King | ed42aca | 2009-12-18 16:31:38 +0000 | [diff] [blame] | 124 | ret = do_adjust_pte(vma, address, pfn, pte); | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 125 |  | 
| Mika Westerberg | 4e54d93 | 2010-10-28 11:45:22 +0100 | [diff] [blame] | 126 | do_pte_unlock(ptl); | 
| Peter Zijlstra | ece0e2b | 2010-10-26 14:21:52 -0700 | [diff] [blame] | 127 | pte_unmap(pte); | 
| Russell King | c26c20b | 2009-12-18 16:21:35 +0000 | [diff] [blame] | 128 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | } | 
|  | 131 |  | 
|  | 132 | static void | 
| Russell King | ae14020 | 2009-12-18 16:43:57 +0000 | [diff] [blame] | 133 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, | 
|  | 134 | unsigned long addr, pte_t *ptep, unsigned long pfn) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | struct mm_struct *mm = vma->vm_mm; | 
|  | 137 | struct vm_area_struct *mpnt; | 
|  | 138 | struct prio_tree_iter iter; | 
|  | 139 | unsigned long offset; | 
|  | 140 | pgoff_t pgoff; | 
|  | 141 | int aliases = 0; | 
|  | 142 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); | 
|  | 144 |  | 
|  | 145 | /* | 
|  | 146 | * If we have any shared mappings that are in the same mm | 
|  | 147 | * space, then we need to handle them specially to maintain | 
|  | 148 | * cache coherency. | 
|  | 149 | */ | 
|  | 150 | flush_dcache_mmap_lock(mapping); | 
|  | 151 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
|  | 152 | /* | 
|  | 153 | * If this VMA is not in our MM, we can ignore it. | 
|  | 154 | * Note that we intentionally mask out the VMA | 
|  | 155 | * that we are fixing up. | 
|  | 156 | */ | 
|  | 157 | if (mpnt->vm_mm != mm || mpnt == vma) | 
|  | 158 | continue; | 
|  | 159 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | 
|  | 160 | continue; | 
|  | 161 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | 
| Russell King | ed42aca | 2009-12-18 16:31:38 +0000 | [diff] [blame] | 162 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } | 
|  | 164 | flush_dcache_mmap_unlock(mapping); | 
|  | 165 | if (aliases) | 
| Russell King | ae14020 | 2009-12-18 16:43:57 +0000 | [diff] [blame] | 166 | do_adjust_pte(vma, addr, pfn, ptep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | } | 
|  | 168 |  | 
|  | 169 | /* | 
|  | 170 | * Take care of architecture specific things when placing a new PTE into | 
|  | 171 | * a page table, or changing an existing PTE.  Basically, there are two | 
|  | 172 | * things that we need to take care of: | 
|  | 173 | * | 
| Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 174 | *  1. If PG_dcache_clean is not set for the page, we need to ensure | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | *     that any cache entries for the kernels virtual memory | 
|  | 176 | *     range are written back to the page. | 
|  | 177 | *  2. If we have multiple shared mappings of the same space in | 
|  | 178 | *     an object, we need to deal with the cache aliasing issues. | 
|  | 179 | * | 
| Hugh Dickins | 69b0475 | 2005-10-29 18:16:36 -0700 | [diff] [blame] | 180 | * Note that the pte lock will be held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | */ | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 182 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | 
|  | 183 | pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | { | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 185 | unsigned long pfn = pte_pfn(*ptep); | 
| Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 186 | struct address_space *mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | struct page *page; | 
|  | 188 |  | 
|  | 189 | if (!pfn_valid(pfn)) | 
|  | 190 | return; | 
| Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 191 |  | 
| Russell King | 421fe93 | 2009-10-25 10:23:04 +0000 | [diff] [blame] | 192 | /* | 
|  | 193 | * The zero page is never written to, so never has any dirty | 
|  | 194 | * cache lines, and therefore never needs to be flushed. | 
|  | 195 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | page = pfn_to_page(pfn); | 
| Russell King | 421fe93 | 2009-10-25 10:23:04 +0000 | [diff] [blame] | 197 | if (page == ZERO_PAGE(0)) | 
|  | 198 | return; | 
|  | 199 |  | 
| Russell King | 8830f04 | 2005-06-20 09:51:03 +0100 | [diff] [blame] | 200 | mapping = page_mapping(page); | 
| Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 201 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | 
| Nitin Gupta | 787b2fa | 2009-10-12 14:20:23 +0530 | [diff] [blame] | 202 | __flush_dcache_page(mapping, page); | 
| Nitin Gupta | 787b2fa | 2009-10-12 14:20:23 +0530 | [diff] [blame] | 203 | if (mapping) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | if (cache_is_vivt()) | 
| Russell King | ae14020 | 2009-12-18 16:43:57 +0000 | [diff] [blame] | 205 | make_coherent(mapping, vma, addr, ptep, pfn); | 
| Catalin Marinas | 826cbda | 2008-06-13 10:28:36 +0100 | [diff] [blame] | 206 | else if (vma->vm_flags & VM_EXEC) | 
|  | 207 | __flush_icache_all(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | } | 
|  | 209 | } | 
| Catalin Marinas | 6012191 | 2010-09-13 15:58:06 +0100 | [diff] [blame] | 210 | #endif	/* __LINUX_ARM_ARCH__ < 6 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 |  | 
|  | 212 | /* | 
|  | 213 | * Check whether the write buffer has physical address aliasing | 
|  | 214 | * issues.  If it has, we need to avoid them for the case where | 
|  | 215 | * we have several shared mappings of the same object in user | 
|  | 216 | * space. | 
|  | 217 | */ | 
|  | 218 | static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) | 
|  | 219 | { | 
|  | 220 | register unsigned long zero = 0, one = 1, val; | 
|  | 221 |  | 
|  | 222 | local_irq_disable(); | 
|  | 223 | mb(); | 
|  | 224 | *p1 = one; | 
|  | 225 | mb(); | 
|  | 226 | *p2 = zero; | 
|  | 227 | mb(); | 
|  | 228 | val = *p1; | 
|  | 229 | mb(); | 
|  | 230 | local_irq_enable(); | 
|  | 231 | return val != zero; | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | void __init check_writebuffer_bugs(void) | 
|  | 235 | { | 
|  | 236 | struct page *page; | 
|  | 237 | const char *reason; | 
|  | 238 | unsigned long v = 1; | 
|  | 239 |  | 
|  | 240 | printk(KERN_INFO "CPU: Testing write buffer coherency: "); | 
|  | 241 |  | 
|  | 242 | page = alloc_page(GFP_KERNEL); | 
|  | 243 | if (page) { | 
|  | 244 | unsigned long *p1, *p2; | 
| Russell King | 52e8bfd | 2009-12-23 19:54:31 +0000 | [diff] [blame] | 245 | pgprot_t prot = __pgprot_modify(PAGE_KERNEL, | 
|  | 246 | L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 |  | 
|  | 248 | p1 = vmap(&page, 1, VM_IOREMAP, prot); | 
|  | 249 | p2 = vmap(&page, 1, VM_IOREMAP, prot); | 
|  | 250 |  | 
|  | 251 | if (p1 && p2) { | 
|  | 252 | v = check_writebuffer(p1, p2); | 
|  | 253 | reason = "enabling work-around"; | 
|  | 254 | } else { | 
|  | 255 | reason = "unable to map memory\n"; | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | vunmap(p1); | 
|  | 259 | vunmap(p2); | 
|  | 260 | put_page(page); | 
|  | 261 | } else { | 
|  | 262 | reason = "unable to grab page\n"; | 
|  | 263 | } | 
|  | 264 |  | 
|  | 265 | if (v) { | 
|  | 266 | printk("failed, %s\n", reason); | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 267 | shared_pte_mask = L_PTE_MT_UNCACHED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | } else { | 
|  | 269 | printk("ok\n"); | 
|  | 270 | } | 
|  | 271 | } |