| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/arm/mm/ioremap.c | 
|  | 3 | * | 
|  | 4 | * Re-map IO memory to kernel address space so that we can access it. | 
|  | 5 | * | 
|  | 6 | * (C) Copyright 1995 1996 Linus Torvalds | 
|  | 7 | * | 
|  | 8 | * Hacked for ARM by Phil Blundell <philb@gnu.org> | 
|  | 9 | * Hacked to allow all architectures to build, and various cleanups | 
|  | 10 | * by Russell King | 
|  | 11 | * | 
|  | 12 | * This allows a driver to remap an arbitrary region of bus memory into | 
|  | 13 | * virtual space.  One should *only* use readl, writel, memcpy_toio and | 
|  | 14 | * so on with such remapped areas. | 
|  | 15 | * | 
|  | 16 | * Because the ARM only has a 32-bit address space we can't address the | 
|  | 17 | * whole of the (physical) PCI space at once.  PCI huge-mode addressing | 
|  | 18 | * allows us to circumvent this restriction by splitting PCI space into | 
|  | 19 | * two 2GB chunks and mapping only one at a time into processor memory. | 
|  | 20 | * We use MMU protection domains to trap any attempt to access the bank | 
|  | 21 | * that is not currently mapped.  (This isn't fully implemented yet.) | 
|  | 22 | */ | 
|  | 23 | #include <linux/module.h> | 
|  | 24 | #include <linux/errno.h> | 
|  | 25 | #include <linux/mm.h> | 
|  | 26 | #include <linux/vmalloc.h> | 
| Russell King | fced80c | 2008-09-06 12:10:45 +0100 | [diff] [blame] | 27 | #include <linux/io.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
| Russell King | 0ba8b9b | 2008-08-10 18:08:10 +0100 | [diff] [blame] | 29 | #include <asm/cputype.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/cacheflush.h> | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 31 | #include <asm/mmu_context.h> | 
|  | 32 | #include <asm/pgalloc.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/tlbflush.h> | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 34 | #include <asm/sizes.h> | 
|  | 35 |  | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 36 | #include <asm/mach/map.h> | 
|  | 37 | #include "mm.h" | 
|  | 38 |  | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 39 | /* | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 40 | * Used by ioremap() and iounmap() code to mark (super)section-mapped | 
|  | 41 | * I/O regions in vm_struct->flags field. | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 42 | */ | 
|  | 43 | #define VM_ARM_SECTION_MAPPING	0x80000000 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 45 | int ioremap_page(unsigned long virt, unsigned long phys, | 
|  | 46 | const struct mem_type *mtype) | 
|  | 47 | { | 
| Russell King | d746196 | 2010-07-26 10:29:13 +0100 | [diff] [blame] | 48 | return ioremap_page_range(virt, virt + PAGE_SIZE, phys, | 
|  | 49 | __pgprot(mtype->prot_pte)); | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 50 | } | 
|  | 51 | EXPORT_SYMBOL(ioremap_page); | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 52 |  | 
|  | 53 | void __check_kvm_seq(struct mm_struct *mm) | 
|  | 54 | { | 
|  | 55 | unsigned int seq; | 
|  | 56 |  | 
|  | 57 | do { | 
|  | 58 | seq = init_mm.context.kvm_seq; | 
|  | 59 | memcpy(pgd_offset(mm, VMALLOC_START), | 
|  | 60 | pgd_offset_k(VMALLOC_START), | 
|  | 61 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | 
|  | 62 | pgd_index(VMALLOC_START))); | 
|  | 63 | mm->context.kvm_seq = seq; | 
|  | 64 | } while (seq != init_mm.context.kvm_seq); | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | #ifndef CONFIG_SMP | 
|  | 68 | /* | 
|  | 69 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | 
|  | 70 | * the other CPUs will not see this change until their next context switch. | 
|  | 71 | * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs | 
|  | 72 | * which requires the new ioremap'd region to be referenced, the CPU will | 
|  | 73 | * reference the _old_ region. | 
|  | 74 | * | 
| Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 75 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to | 
|  | 76 | * mask the size back to 1MB aligned or we will overflow in the loop below. | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 77 | */ | 
|  | 78 | static void unmap_area_sections(unsigned long virt, unsigned long size) | 
|  | 79 | { | 
| Russell King | 24f11ec | 2009-01-25 17:36:34 +0000 | [diff] [blame] | 80 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 81 | pgd_t *pgd; | 
|  | 82 |  | 
|  | 83 | flush_cache_vunmap(addr, end); | 
|  | 84 | pgd = pgd_offset_k(addr); | 
|  | 85 | do { | 
|  | 86 | pmd_t pmd, *pmdp = pmd_offset(pgd, addr); | 
|  | 87 |  | 
|  | 88 | pmd = *pmdp; | 
|  | 89 | if (!pmd_none(pmd)) { | 
|  | 90 | /* | 
|  | 91 | * Clear the PMD from the page table, and | 
|  | 92 | * increment the kvm sequence so others | 
|  | 93 | * notice this change. | 
|  | 94 | * | 
|  | 95 | * Note: this is still racy on SMP machines. | 
|  | 96 | */ | 
|  | 97 | pmd_clear(pmdp); | 
|  | 98 | init_mm.context.kvm_seq++; | 
|  | 99 |  | 
|  | 100 | /* | 
|  | 101 | * Free the page table, if there was one. | 
|  | 102 | */ | 
|  | 103 | if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) | 
| Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 104 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 105 | } | 
|  | 106 |  | 
|  | 107 | addr += PGDIR_SIZE; | 
|  | 108 | pgd++; | 
|  | 109 | } while (addr < end); | 
|  | 110 |  | 
|  | 111 | /* | 
|  | 112 | * Ensure that the active_mm is up to date - we want to | 
|  | 113 | * catch any use-after-iounmap cases. | 
|  | 114 | */ | 
|  | 115 | if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) | 
|  | 116 | __check_kvm_seq(current->active_mm); | 
|  | 117 |  | 
|  | 118 | flush_tlb_kernel_range(virt, end); | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | static int | 
|  | 122 | remap_area_sections(unsigned long virt, unsigned long pfn, | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 123 | size_t size, const struct mem_type *type) | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 124 | { | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 125 | unsigned long addr = virt, end = virt + size; | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 126 | pgd_t *pgd; | 
|  | 127 |  | 
|  | 128 | /* | 
|  | 129 | * Remove and free any PTE-based mapping, and | 
|  | 130 | * sync the current kernel mapping. | 
|  | 131 | */ | 
|  | 132 | unmap_area_sections(virt, size); | 
|  | 133 |  | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 134 | pgd = pgd_offset_k(addr); | 
|  | 135 | do { | 
|  | 136 | pmd_t *pmd = pmd_offset(pgd, addr); | 
|  | 137 |  | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 138 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 139 | pfn += SZ_1M >> PAGE_SHIFT; | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 140 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 141 | pfn += SZ_1M >> PAGE_SHIFT; | 
|  | 142 | flush_pmd_entry(pmd); | 
|  | 143 |  | 
|  | 144 | addr += PGDIR_SIZE; | 
|  | 145 | pgd++; | 
|  | 146 | } while (addr < end); | 
|  | 147 |  | 
|  | 148 | return 0; | 
|  | 149 | } | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 150 |  | 
|  | 151 | static int | 
|  | 152 | remap_area_supersections(unsigned long virt, unsigned long pfn, | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 153 | size_t size, const struct mem_type *type) | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 154 | { | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 155 | unsigned long addr = virt, end = virt + size; | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 156 | pgd_t *pgd; | 
|  | 157 |  | 
|  | 158 | /* | 
|  | 159 | * Remove and free any PTE-based mapping, and | 
|  | 160 | * sync the current kernel mapping. | 
|  | 161 | */ | 
|  | 162 | unmap_area_sections(virt, size); | 
|  | 163 |  | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 164 | pgd = pgd_offset_k(virt); | 
|  | 165 | do { | 
|  | 166 | unsigned long super_pmd_val, i; | 
|  | 167 |  | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 168 | super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | | 
|  | 169 | PMD_SECT_SUPER; | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 170 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; | 
|  | 171 |  | 
|  | 172 | for (i = 0; i < 8; i++) { | 
|  | 173 | pmd_t *pmd = pmd_offset(pgd, addr); | 
|  | 174 |  | 
|  | 175 | pmd[0] = __pmd(super_pmd_val); | 
|  | 176 | pmd[1] = __pmd(super_pmd_val); | 
|  | 177 | flush_pmd_entry(pmd); | 
|  | 178 |  | 
|  | 179 | addr += PGDIR_SIZE; | 
|  | 180 | pgd++; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; | 
|  | 184 | } while (addr < end); | 
|  | 185 |  | 
|  | 186 | return 0; | 
|  | 187 | } | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 188 | #endif | 
|  | 189 |  | 
| Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 190 | void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | 
|  | 191 | unsigned long offset, size_t size, unsigned int mtype, void *caller) | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 192 | { | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 193 | const struct mem_type *type; | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 194 | int err; | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 195 | unsigned long addr; | 
|  | 196 | struct vm_struct * area; | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 197 |  | 
|  | 198 | /* | 
|  | 199 | * High mappings must be supersection aligned | 
|  | 200 | */ | 
|  | 201 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 
|  | 202 | return NULL; | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 203 |  | 
| Russell King | 309caa9 | 2010-06-21 21:03:18 +0100 | [diff] [blame] | 204 | /* | 
|  | 205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 
|  | 206 | */ | 
|  | 207 | if (WARN_ON(pfn_valid(pfn))) | 
|  | 208 | return NULL; | 
|  | 209 |  | 
| Russell King | 3603ab2 | 2007-05-05 20:59:27 +0100 | [diff] [blame] | 210 | type = get_mem_type(mtype); | 
|  | 211 | if (!type) | 
|  | 212 | return NULL; | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 213 |  | 
| Russell King | 6d78b5f | 2007-06-03 19:26:04 +0100 | [diff] [blame] | 214 | /* | 
|  | 215 | * Page align the mapping size, taking account of any offset. | 
|  | 216 | */ | 
|  | 217 | size = PAGE_ALIGN(offset + size); | 
| Russell King | c924aff | 2006-12-17 23:29:57 +0000 | [diff] [blame] | 218 |  | 
| Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 219 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 220 | if (!area) | 
|  | 221 | return NULL; | 
|  | 222 | addr = (unsigned long)area->addr; | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 223 |  | 
|  | 224 | #ifndef CONFIG_SMP | 
| Catalin Marinas | 412489a | 2007-01-25 14:16:47 +0100 | [diff] [blame] | 225 | if (DOMAIN_IO == 0 && | 
|  | 226 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 
| Russell King | 4a56c1e | 2007-04-21 10:16:48 +0100 | [diff] [blame] | 227 | cpu_is_xsc3()) && pfn >= 0x100000 && | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 228 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { | 
|  | 229 | area->flags |= VM_ARM_SECTION_MAPPING; | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 230 | err = remap_area_supersections(addr, pfn, size, type); | 
| Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 231 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 232 | area->flags |= VM_ARM_SECTION_MAPPING; | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 233 | err = remap_area_sections(addr, pfn, size, type); | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 234 | } else | 
|  | 235 | #endif | 
| Russell King | d746196 | 2010-07-26 10:29:13 +0100 | [diff] [blame] | 236 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), | 
|  | 237 | __pgprot(type->prot_pte)); | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 238 |  | 
|  | 239 | if (err) { | 
| Catalin Marinas | 478922c | 2006-05-16 11:30:26 +0100 | [diff] [blame] | 240 | vunmap((void *)addr); | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 241 | return NULL; | 
|  | 242 | } | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 243 |  | 
|  | 244 | flush_cache_vmap(addr, addr + size); | 
|  | 245 | return (void __iomem *) (offset + addr); | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 246 | } | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 247 |  | 
| Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 248 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | 
|  | 249 | unsigned int mtype, void *caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | { | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 251 | unsigned long last_addr; | 
|  | 252 | unsigned long offset = phys_addr & ~PAGE_MASK; | 
|  | 253 | unsigned long pfn = __phys_to_pfn(phys_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 |  | 
| Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 255 | /* | 
|  | 256 | * Don't allow wraparound or zero size | 
|  | 257 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | last_addr = phys_addr + size - 1; | 
|  | 259 | if (!size || last_addr < phys_addr) | 
|  | 260 | return NULL; | 
|  | 261 |  | 
| Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 262 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, | 
|  | 263 | caller); | 
|  | 264 | } | 
|  | 265 |  | 
|  | 266 | /* | 
|  | 267 | * Remap an arbitrary physical address space into the kernel virtual | 
|  | 268 | * address space. Needed when the kernel wants to access high addresses | 
|  | 269 | * directly. | 
|  | 270 | * | 
|  | 271 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | 
|  | 272 | * have to convert them into an offset in a page-aligned mapping, but the | 
|  | 273 | * caller shouldn't need to know that small detail. | 
|  | 274 | */ | 
|  | 275 | void __iomem * | 
|  | 276 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | 
|  | 277 | unsigned int mtype) | 
|  | 278 | { | 
|  | 279 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, | 
|  | 280 | __builtin_return_address(0)); | 
|  | 281 | } | 
|  | 282 | EXPORT_SYMBOL(__arm_ioremap_pfn); | 
|  | 283 |  | 
|  | 284 | void __iomem * | 
|  | 285 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | 
|  | 286 | { | 
|  | 287 | return __arm_ioremap_caller(phys_addr, size, mtype, | 
|  | 288 | __builtin_return_address(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } | 
| Russell King | 3603ab2 | 2007-05-05 20:59:27 +0100 | [diff] [blame] | 290 | EXPORT_SYMBOL(__arm_ioremap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 |  | 
| Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 292 | void __iounmap(volatile void __iomem *io_addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | { | 
| Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 294 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 
| Catalin Marinas | ceaccbd | 2006-07-29 08:29:30 +0100 | [diff] [blame] | 295 | #ifndef CONFIG_SMP | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 296 | struct vm_struct **p, *tmp; | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 297 |  | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 298 | /* | 
|  | 299 | * If this is a section based mapping we need to handle it | 
| Simon Arlott | 6cbdc8c | 2007-05-11 20:40:30 +0100 | [diff] [blame] | 300 | * specially as the VM subsystem does not know how to handle | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 301 | * such a beast. We need the lock here b/c we need to clear | 
|  | 302 | * all the mappings before the area can be reclaimed | 
|  | 303 | * by someone else. | 
|  | 304 | */ | 
|  | 305 | write_lock(&vmlist_lock); | 
|  | 306 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { | 
| Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 307 | if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 308 | if (tmp->flags & VM_ARM_SECTION_MAPPING) { | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 309 | unmap_area_sections((unsigned long)tmp->addr, | 
|  | 310 | tmp->size); | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 311 | } | 
|  | 312 | break; | 
|  | 313 | } | 
|  | 314 | } | 
|  | 315 | write_unlock(&vmlist_lock); | 
| Lennert Buytenhek | 7cddc39 | 2006-07-03 12:26:02 +0100 | [diff] [blame] | 316 | #endif | 
| Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 317 |  | 
| Russell King | 24f11ec | 2009-01-25 17:36:34 +0000 | [diff] [blame] | 318 | vunmap(addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | } | 
|  | 320 | EXPORT_SYMBOL(__iounmap); |