| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Re-map IO memory to kernel address space so that we can access it. | 
|  | 3 | * This is needed for high PCI addresses that aren't mapped in the | 
|  | 4 | * 640k-1MB IO memory area on PC's | 
|  | 5 | * | 
|  | 6 | * (C) Copyright 1995 1996 Linus Torvalds | 
|  | 7 | */ | 
|  | 8 |  | 
| Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 9 | #include <linux/bootmem.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/init.h> | 
| Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 11 | #include <linux/io.h> | 
| Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/slab.h> | 
|  | 14 | #include <linux/vmalloc.h> | 
| Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 15 | #include <linux/mmiotrace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  | 
| Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 17 | #include <asm/cacheflush.h> | 
|  | 18 | #include <asm/e820.h> | 
|  | 19 | #include <asm/fixmap.h> | 
|  | 20 | #include <asm/pgtable.h> | 
|  | 21 | #include <asm/tlbflush.h> | 
| Jeremy Fitzhardinge | f6df72e | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 22 | #include <asm/pgalloc.h> | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 23 | #include <asm/pat.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 |  | 
| Thomas Gleixner | 240d3a7 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 25 | #ifdef CONFIG_X86_64 | 
|  | 26 |  | 
|  | 27 | unsigned long __phys_addr(unsigned long x) | 
|  | 28 | { | 
|  | 29 | if (x >= __START_KERNEL_map) | 
|  | 30 | return x - __START_KERNEL_map + phys_base; | 
|  | 31 | return x - PAGE_OFFSET; | 
|  | 32 | } | 
|  | 33 | EXPORT_SYMBOL(__phys_addr); | 
|  | 34 |  | 
| Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 35 | static inline int phys_addr_valid(unsigned long addr) | 
|  | 36 | { | 
|  | 37 | return addr < (1UL << boot_cpu_data.x86_phys_bits); | 
|  | 38 | } | 
|  | 39 |  | 
|  | 40 | #else | 
|  | 41 |  | 
|  | 42 | static inline int phys_addr_valid(unsigned long addr) | 
|  | 43 | { | 
|  | 44 | return 1; | 
|  | 45 | } | 
|  | 46 |  | 
| Thomas Gleixner | 240d3a7 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 47 | #endif | 
|  | 48 |  | 
| Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 49 | int page_is_ram(unsigned long pagenr) | 
|  | 50 | { | 
| Ingo Molnar | 756a6c6 | 2008-03-25 08:31:17 +0100 | [diff] [blame] | 51 | resource_size_t addr, end; | 
| Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 52 | int i; | 
|  | 53 |  | 
| Arjan van de Ven | d8a9e6a | 2008-02-18 09:54:33 -0800 | [diff] [blame] | 54 | /* | 
|  | 55 | * A special case is the first 4Kb of memory; | 
|  | 56 | * This is a BIOS owned area, not kernel ram, but generally | 
|  | 57 | * not listed as such in the E820 table. | 
|  | 58 | */ | 
|  | 59 | if (pagenr == 0) | 
|  | 60 | return 0; | 
|  | 61 |  | 
| Arjan van de Ven | 156fbc3 | 2008-02-18 09:58:45 -0800 | [diff] [blame] | 62 | /* | 
|  | 63 | * Second special case: Some BIOSen report the PC BIOS | 
|  | 64 | * area (640->1Mb) as ram even though it is not. | 
|  | 65 | */ | 
|  | 66 | if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) && | 
|  | 67 | pagenr < (BIOS_END >> PAGE_SHIFT)) | 
|  | 68 | return 0; | 
| Arjan van de Ven | d8a9e6a | 2008-02-18 09:54:33 -0800 | [diff] [blame] | 69 |  | 
| Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 70 | for (i = 0; i < e820.nr_map; i++) { | 
|  | 71 | /* | 
|  | 72 | * Not usable memory: | 
|  | 73 | */ | 
|  | 74 | if (e820.map[i].type != E820_RAM) | 
|  | 75 | continue; | 
| Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 76 | addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; | 
|  | 77 | end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; | 
| Thomas Gleixner | 950f9d9 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 78 |  | 
| Thomas Gleixner | 950f9d9 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 79 |  | 
| Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 80 | if ((pagenr >= addr) && (pagenr < end)) | 
|  | 81 | return 1; | 
|  | 82 | } | 
|  | 83 | return 0; | 
|  | 84 | } | 
|  | 85 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | /* | 
| Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 87 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 
|  | 88 | * conflicts. | 
|  | 89 | */ | 
| venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 90 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, | 
|  | 91 | unsigned long prot_val) | 
| Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 92 | { | 
| Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 93 | unsigned long nrpages = size >> PAGE_SHIFT; | 
| Harvey Harrison | 93809be | 2008-02-01 17:49:43 +0100 | [diff] [blame] | 94 | int err; | 
| Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 95 |  | 
| venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 96 | switch (prot_val) { | 
|  | 97 | case _PAGE_CACHE_UC: | 
| Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 98 | default: | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 99 | err = _set_memory_uc(vaddr, nrpages); | 
| Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 100 | break; | 
| venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 101 | case _PAGE_CACHE_WC: | 
|  | 102 | err = _set_memory_wc(vaddr, nrpages); | 
|  | 103 | break; | 
| venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 104 | case _PAGE_CACHE_WB: | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 105 | err = _set_memory_wb(vaddr, nrpages); | 
| Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 106 | break; | 
|  | 107 | } | 
| Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 108 |  | 
|  | 109 | return err; | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | * Remap an arbitrary physical address space into the kernel virtual | 
|  | 114 | * address space. Needed when the kernel wants to access high addresses | 
|  | 115 | * directly. | 
|  | 116 | * | 
|  | 117 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | 
|  | 118 | * have to convert them into an offset in a page-aligned mapping, but the | 
|  | 119 | * caller shouldn't need to know that small detail. | 
|  | 120 | */ | 
| Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 121 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, | 
|  | 122 | unsigned long size, unsigned long prot_val, void *caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | { | 
| Ingo Molnar | 756a6c6 | 2008-03-25 08:31:17 +0100 | [diff] [blame] | 124 | unsigned long pfn, offset, vaddr; | 
|  | 125 | resource_size_t last_addr; | 
| Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 126 | const resource_size_t unaligned_phys_addr = phys_addr; | 
|  | 127 | const unsigned long unaligned_size = size; | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 128 | struct vm_struct *area; | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 129 | unsigned long new_prot_val; | 
| Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 130 | pgprot_t prot; | 
| Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 131 | int retval; | 
| Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 132 | void __iomem *ret_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
|  | 134 | /* Don't allow wraparound or zero size */ | 
|  | 135 | last_addr = phys_addr + size - 1; | 
|  | 136 | if (!size || last_addr < phys_addr) | 
|  | 137 | return NULL; | 
|  | 138 |  | 
| Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 139 | if (!phys_addr_valid(phys_addr)) { | 
| venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 140 | printk(KERN_WARNING "ioremap: invalid physical address %llx\n", | 
| Randy Dunlap | 4c8337a | 2008-04-10 15:09:50 -0700 | [diff] [blame] | 141 | (unsigned long long)phys_addr); | 
| Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 142 | WARN_ON_ONCE(1); | 
|  | 143 | return NULL; | 
|  | 144 | } | 
|  | 145 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | /* | 
|  | 147 | * Don't remap the low PCI/ISA area, it's always mapped.. | 
|  | 148 | */ | 
| Andreas Herrmann | bcc643d | 2008-06-20 21:58:46 +0200 | [diff] [blame] | 149 | if (is_ISA_range(phys_addr, last_addr)) | 
| Thomas Gleixner | 4b40fce | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 150 | return (__force void __iomem *)phys_to_virt(phys_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 |  | 
|  | 152 | /* | 
|  | 153 | * Don't allow anybody to remap normal RAM that we're using.. | 
|  | 154 | */ | 
| Andres Salomon | cb8ab68 | 2008-04-30 11:30:24 -0400 | [diff] [blame] | 155 | for (pfn = phys_addr >> PAGE_SHIFT; | 
|  | 156 | (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); | 
|  | 157 | pfn++) { | 
| Ingo Molnar | bdd3cee | 2008-02-28 14:10:49 +0100 | [diff] [blame] | 158 |  | 
| Ingo Molnar | ba748d2 | 2008-03-03 09:37:41 +0100 | [diff] [blame] | 159 | int is_ram = page_is_ram(pfn); | 
|  | 160 |  | 
|  | 161 | if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) | 
| Thomas Gleixner | 266b9f8 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 162 | return NULL; | 
| Ingo Molnar | ba748d2 | 2008-03-03 09:37:41 +0100 | [diff] [blame] | 163 | WARN_ON_ONCE(is_ram); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | } | 
|  | 165 |  | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 166 | /* | 
|  | 167 | * Mappings have to be page-aligned | 
|  | 168 | */ | 
|  | 169 | offset = phys_addr & ~PAGE_MASK; | 
|  | 170 | phys_addr &= PAGE_MASK; | 
|  | 171 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 
|  | 172 |  | 
| Andi Kleen | e213e87 | 2008-08-15 18:12:47 +0200 | [diff] [blame] | 173 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, | 
| Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 174 | prot_val, &new_prot_val); | 
|  | 175 | if (retval) { | 
| Venki Pallipadi | b450e5e | 2008-03-25 16:51:26 -0700 | [diff] [blame] | 176 | pr_debug("Warning: reserve_memtype returned %d\n", retval); | 
| Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 177 | return NULL; | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | if (prot_val != new_prot_val) { | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 181 | /* | 
|  | 182 | * Do not fallback to certain memory types with certain | 
|  | 183 | * requested type: | 
| Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 184 | * - request is uc-, return cannot be write-back | 
|  | 185 | * - request is uc-, return cannot be write-combine | 
| venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 186 | * - request is write-combine, return cannot be write-back | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 187 | */ | 
| Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 188 | if ((prot_val == _PAGE_CACHE_UC_MINUS && | 
| venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 189 | (new_prot_val == _PAGE_CACHE_WB || | 
|  | 190 | new_prot_val == _PAGE_CACHE_WC)) || | 
|  | 191 | (prot_val == _PAGE_CACHE_WC && | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 192 | new_prot_val == _PAGE_CACHE_WB)) { | 
| Venki Pallipadi | b450e5e | 2008-03-25 16:51:26 -0700 | [diff] [blame] | 193 | pr_debug( | 
| venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 194 | "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", | 
| Randy Dunlap | 4c8337a | 2008-04-10 15:09:50 -0700 | [diff] [blame] | 195 | (unsigned long long)phys_addr, | 
|  | 196 | (unsigned long long)(phys_addr + size), | 
| venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 197 | prot_val, new_prot_val); | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 198 | free_memtype(phys_addr, phys_addr + size); | 
|  | 199 | return NULL; | 
|  | 200 | } | 
|  | 201 | prot_val = new_prot_val; | 
|  | 202 | } | 
|  | 203 |  | 
| venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 204 | switch (prot_val) { | 
|  | 205 | case _PAGE_CACHE_UC: | 
| Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 206 | default: | 
| Ingo Molnar | 55c6268 | 2008-03-26 06:19:45 +0100 | [diff] [blame] | 207 | prot = PAGE_KERNEL_NOCACHE; | 
| Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 208 | break; | 
| Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 209 | case _PAGE_CACHE_UC_MINUS: | 
|  | 210 | prot = PAGE_KERNEL_UC_MINUS; | 
|  | 211 | break; | 
| venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 212 | case _PAGE_CACHE_WC: | 
|  | 213 | prot = PAGE_KERNEL_WC; | 
|  | 214 | break; | 
| venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 215 | case _PAGE_CACHE_WB: | 
| Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 216 | prot = PAGE_KERNEL; | 
|  | 217 | break; | 
|  | 218 | } | 
| Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 219 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | * Ok, go for it.. | 
|  | 222 | */ | 
| Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 223 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | if (!area) | 
|  | 225 | return NULL; | 
|  | 226 | area->phys_addr = phys_addr; | 
| Thomas Gleixner | e66aadb | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 227 | vaddr = (unsigned long) area->addr; | 
|  | 228 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 229 | free_memtype(phys_addr, phys_addr + size); | 
| Ingo Molnar | b16bf71 | 2008-02-28 14:02:08 +0100 | [diff] [blame] | 230 | free_vm_area(area); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | return NULL; | 
|  | 232 | } | 
| Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 233 |  | 
| venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 234 | if (ioremap_change_attr(vaddr, size, prot_val) < 0) { | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 235 | free_memtype(phys_addr, phys_addr + size); | 
| Thomas Gleixner | e66aadb | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 236 | vunmap(area->addr); | 
| Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 237 | return NULL; | 
|  | 238 | } | 
|  | 239 |  | 
| Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 240 | ret_addr = (void __iomem *) (vaddr + offset); | 
| Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 241 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); | 
| Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 242 |  | 
|  | 243 | return ret_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 |  | 
|  | 246 | /** | 
|  | 247 | * ioremap_nocache     -   map bus memory into CPU space | 
|  | 248 | * @offset:    bus address of the memory | 
|  | 249 | * @size:      size of the resource to map | 
|  | 250 | * | 
|  | 251 | * ioremap_nocache performs a platform specific sequence of operations to | 
|  | 252 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | 
|  | 253 | * writew/writel functions and the other mmio helpers. The returned | 
|  | 254 | * address is not guaranteed to be usable directly as a virtual | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 255 | * address. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | * | 
|  | 257 | * This version of ioremap ensures that the memory is marked uncachable | 
|  | 258 | * on the CPU as well as honouring existing caching rules from things like | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 259 | * the PCI bus. Note that there are other caches and buffers on many | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | * busses. In particular driver authors should read up on PCI writes | 
|  | 261 | * | 
|  | 262 | * It's useful if some control registers are in such an area and | 
|  | 263 | * write combining or read caching is not desirable: | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 264 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | * Must be freed with iounmap. | 
|  | 266 | */ | 
| Linus Torvalds | b9e76a0 | 2008-03-24 11:22:39 -0700 | [diff] [blame] | 267 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | { | 
| Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 269 | /* | 
|  | 270 | * Ideally, this should be: | 
| Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 271 | *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; | 
| Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 272 | * | 
|  | 273 | * Till we fix all X drivers to use ioremap_wc(), we will use | 
|  | 274 | * UC MINUS. | 
|  | 275 | */ | 
|  | 276 | unsigned long val = _PAGE_CACHE_UC_MINUS; | 
|  | 277 |  | 
|  | 278 | return __ioremap_caller(phys_addr, size, val, | 
| Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 279 | __builtin_return_address(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | } | 
| Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 281 | EXPORT_SYMBOL(ioremap_nocache); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 |  | 
| venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 283 | /** | 
|  | 284 | * ioremap_wc	-	map memory into CPU space write combined | 
|  | 285 | * @offset:	bus address of the memory | 
|  | 286 | * @size:	size of the resource to map | 
|  | 287 | * | 
|  | 288 | * This version of ioremap ensures that the memory is marked write combining. | 
|  | 289 | * Write combining allows faster writes to some hardware devices. | 
|  | 290 | * | 
|  | 291 | * Must be freed with iounmap. | 
|  | 292 | */ | 
|  | 293 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) | 
|  | 294 | { | 
| Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 295 | if (pat_enabled) | 
| Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 296 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, | 
|  | 297 | __builtin_return_address(0)); | 
| venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 298 | else | 
|  | 299 | return ioremap_nocache(phys_addr, size); | 
|  | 300 | } | 
|  | 301 | EXPORT_SYMBOL(ioremap_wc); | 
|  | 302 |  | 
| Linus Torvalds | b9e76a0 | 2008-03-24 11:22:39 -0700 | [diff] [blame] | 303 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) | 
| Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 304 | { | 
| Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 305 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, | 
|  | 306 | __builtin_return_address(0)); | 
| Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 307 | } | 
|  | 308 | EXPORT_SYMBOL(ioremap_cache); | 
|  | 309 |  | 
| Venkatesh Pallipadi | a361ee5 | 2008-07-10 10:09:59 +0200 | [diff] [blame] | 310 | static void __iomem *ioremap_default(resource_size_t phys_addr, | 
|  | 311 | unsigned long size) | 
|  | 312 | { | 
|  | 313 | unsigned long flags; | 
|  | 314 | void *ret; | 
|  | 315 | int err; | 
|  | 316 |  | 
|  | 317 | /* | 
|  | 318 | * - WB for WB-able memory and no other conflicting mappings | 
|  | 319 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | 
|  | 320 | * - Inherit from confliting mappings otherwise | 
|  | 321 | */ | 
|  | 322 | err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags); | 
|  | 323 | if (err < 0) | 
|  | 324 | return NULL; | 
|  | 325 |  | 
|  | 326 | ret = (void *) __ioremap_caller(phys_addr, size, flags, | 
|  | 327 | __builtin_return_address(0)); | 
|  | 328 |  | 
|  | 329 | free_memtype(phys_addr, phys_addr + size); | 
|  | 330 | return (void __iomem *)ret; | 
|  | 331 | } | 
|  | 332 |  | 
| Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 333 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | 
|  | 334 | unsigned long prot_val) | 
|  | 335 | { | 
|  | 336 | return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), | 
|  | 337 | __builtin_return_address(0)); | 
|  | 338 | } | 
|  | 339 | EXPORT_SYMBOL(ioremap_prot); | 
|  | 340 |  | 
| Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 341 | /** | 
|  | 342 | * iounmap - Free a IO remapping | 
|  | 343 | * @addr: virtual address from ioremap_* | 
|  | 344 | * | 
|  | 345 | * Caller must ensure there is only one unmapping for the same pointer. | 
|  | 346 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | void iounmap(volatile void __iomem *addr) | 
|  | 348 | { | 
| Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 349 | struct vm_struct *p, *o; | 
| Andrew Morton | c23a4e9 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 350 |  | 
|  | 351 | if ((void __force *)addr <= high_memory) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | return; | 
|  | 353 |  | 
|  | 354 | /* | 
|  | 355 | * __ioremap special-cases the PCI/ISA range by not instantiating a | 
|  | 356 | * vm_area and by simply returning an address into the kernel mapping | 
|  | 357 | * of ISA space.   So handle that here. | 
|  | 358 | */ | 
| Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 359 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && | 
|  | 360 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | return; | 
|  | 362 |  | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 363 | addr = (volatile void __iomem *) | 
|  | 364 | (PAGE_MASK & (unsigned long __force)addr); | 
| Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 365 |  | 
| Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 366 | mmiotrace_iounmap(addr); | 
|  | 367 |  | 
| Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 368 | /* Use the vm area unlocked, assuming the caller | 
|  | 369 | ensures there isn't another iounmap for the same address | 
|  | 370 | in parallel. Reuse of the virtual address is prevented by | 
|  | 371 | leaving it in the global lists until we're done with it. | 
|  | 372 | cpa takes care of the direct mappings. */ | 
|  | 373 | read_lock(&vmlist_lock); | 
|  | 374 | for (p = vmlist; p; p = p->next) { | 
| Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 375 | if (p->addr == (void __force *)addr) | 
| Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 376 | break; | 
|  | 377 | } | 
|  | 378 | read_unlock(&vmlist_lock); | 
|  | 379 |  | 
|  | 380 | if (!p) { | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 381 | printk(KERN_ERR "iounmap: bad address %p\n", addr); | 
| Andrew Morton | c23a4e9 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 382 | dump_stack(); | 
| Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 383 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | } | 
|  | 385 |  | 
| venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 386 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); | 
|  | 387 |  | 
| Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 388 | /* Finally remove it */ | 
| Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 389 | o = remove_vm_area((void __force *)addr); | 
| Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 390 | BUG_ON(p != o || o == NULL); | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 391 | kfree(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | } | 
| Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 393 | EXPORT_SYMBOL(iounmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 |  | 
| venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 395 | /* | 
|  | 396 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | 
|  | 397 | * access | 
|  | 398 | */ | 
|  | 399 | void *xlate_dev_mem_ptr(unsigned long phys) | 
|  | 400 | { | 
|  | 401 | void *addr; | 
|  | 402 | unsigned long start = phys & PAGE_MASK; | 
|  | 403 |  | 
|  | 404 | /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ | 
|  | 405 | if (page_is_ram(start >> PAGE_SHIFT)) | 
|  | 406 | return __va(phys); | 
|  | 407 |  | 
| Ingo Molnar | ae94b80 | 2008-07-12 07:29:02 +0200 | [diff] [blame] | 408 | addr = (void __force *)ioremap_default(start, PAGE_SIZE); | 
| venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 409 | if (addr) | 
|  | 410 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | 
|  | 411 |  | 
|  | 412 | return addr; | 
|  | 413 | } | 
|  | 414 |  | 
|  | 415 | void unxlate_dev_mem_ptr(unsigned long phys, void *addr) | 
|  | 416 | { | 
|  | 417 | if (page_is_ram(phys >> PAGE_SHIFT)) | 
|  | 418 | return; | 
|  | 419 |  | 
|  | 420 | iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); | 
|  | 421 | return; | 
|  | 422 | } | 
|  | 423 |  | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 424 | int __initdata early_ioremap_debug; | 
|  | 425 |  | 
|  | 426 | static int __init early_ioremap_debug_setup(char *str) | 
|  | 427 | { | 
|  | 428 | early_ioremap_debug = 1; | 
|  | 429 |  | 
| Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 430 | return 0; | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 431 | } | 
| Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 432 | early_param("early_ioremap_debug", early_ioremap_debug_setup); | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 433 |  | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 434 | static __initdata int after_paging_init; | 
| Jeremy Fitzhardinge | a7bf0bd | 2008-05-28 15:02:14 +0100 | [diff] [blame] | 435 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 436 |  | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 437 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 438 | { | 
| Jeremy Fitzhardinge | 37cc8d7 | 2008-02-13 16:20:35 +0100 | [diff] [blame] | 439 | /* Don't assume we're using swapper_pg_dir at this point */ | 
|  | 440 | pgd_t *base = __va(read_cr3()); | 
|  | 441 | pgd_t *pgd = &base[pgd_index(addr)]; | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 442 | pud_t *pud = pud_offset(pgd, addr); | 
|  | 443 | pmd_t *pmd = pmd_offset(pud, addr); | 
|  | 444 |  | 
|  | 445 | return pmd; | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 446 | } | 
|  | 447 |  | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 448 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 449 | { | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 450 | return &bm_pte[pte_index(addr)]; | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 451 | } | 
|  | 452 |  | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 453 | void __init early_ioremap_init(void) | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 454 | { | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 455 | pmd_t *pmd; | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 456 |  | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 457 | if (early_ioremap_debug) | 
| Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 458 | printk(KERN_INFO "early_ioremap_init()\n"); | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 459 |  | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 460 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 461 | memset(bm_pte, 0, sizeof(bm_pte)); | 
| Ian Campbell | b6fbb66 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 462 | pmd_populate_kernel(&init_mm, pmd, bm_pte); | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 463 |  | 
| Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 464 | /* | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 465 | * The boot-ioremap range spans multiple pmds, for which | 
| Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 466 | * we are not prepared: | 
|  | 467 | */ | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 468 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { | 
| Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 469 | WARN_ON(1); | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 470 | printk(KERN_WARNING "pmd %p != %p\n", | 
|  | 471 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 472 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 473 | fix_to_virt(FIX_BTMAP_BEGIN)); | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 474 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n", | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 475 | fix_to_virt(FIX_BTMAP_END)); | 
| Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 476 |  | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 477 | printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END); | 
|  | 478 | printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n", | 
|  | 479 | FIX_BTMAP_BEGIN); | 
| Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 480 | } | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 481 | } | 
|  | 482 |  | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 483 | void __init early_ioremap_clear(void) | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 484 | { | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 485 | pmd_t *pmd; | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 486 |  | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 487 | if (early_ioremap_debug) | 
| Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 488 | printk(KERN_INFO "early_ioremap_clear()\n"); | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 489 |  | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 490 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); | 
|  | 491 | pmd_clear(pmd); | 
| Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 492 | paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 493 | __flush_tlb_all(); | 
|  | 494 | } | 
|  | 495 |  | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 496 | void __init early_ioremap_reset(void) | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 497 | { | 
|  | 498 | enum fixed_addresses idx; | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 499 | unsigned long addr, phys; | 
|  | 500 | pte_t *pte; | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 501 |  | 
|  | 502 | after_paging_init = 1; | 
| Huang, Ying | 64a8f85 | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 503 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 504 | addr = fix_to_virt(idx); | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 505 | pte = early_ioremap_pte(addr); | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 506 | if (pte_present(*pte)) { | 
|  | 507 | phys = pte_val(*pte) & PAGE_MASK; | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 508 | set_fixmap(idx, phys); | 
|  | 509 | } | 
|  | 510 | } | 
|  | 511 | } | 
|  | 512 |  | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 513 | static void __init __early_set_fixmap(enum fixed_addresses idx, | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 514 | unsigned long phys, pgprot_t flags) | 
|  | 515 | { | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 516 | unsigned long addr = __fix_to_virt(idx); | 
|  | 517 | pte_t *pte; | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 518 |  | 
|  | 519 | if (idx >= __end_of_fixed_addresses) { | 
|  | 520 | BUG(); | 
|  | 521 | return; | 
|  | 522 | } | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 523 | pte = early_ioremap_pte(addr); | 
| Jeremy Fitzhardinge | 4583ed5 | 2008-06-25 00:19:03 -0400 | [diff] [blame] | 524 |  | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 525 | if (pgprot_val(flags)) | 
| Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 526 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 527 | else | 
| Jeremy Fitzhardinge | 4f9c11d | 2008-06-25 00:19:19 -0400 | [diff] [blame] | 528 | pte_clear(&init_mm, addr, pte); | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 529 | __flush_tlb_one(addr); | 
|  | 530 | } | 
|  | 531 |  | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 532 | static inline void __init early_set_fixmap(enum fixed_addresses idx, | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 533 | unsigned long phys) | 
|  | 534 | { | 
|  | 535 | if (after_paging_init) | 
|  | 536 | set_fixmap(idx, phys); | 
|  | 537 | else | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 538 | __early_set_fixmap(idx, phys, PAGE_KERNEL); | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 539 | } | 
|  | 540 |  | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 541 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 542 | { | 
|  | 543 | if (after_paging_init) | 
|  | 544 | clear_fixmap(idx); | 
|  | 545 | else | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 546 | __early_set_fixmap(idx, 0, __pgprot(0)); | 
| Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 547 | } | 
|  | 548 |  | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 549 |  | 
|  | 550 | int __initdata early_ioremap_nested; | 
|  | 551 |  | 
| Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 552 | static int __init check_early_ioremap_leak(void) | 
|  | 553 | { | 
|  | 554 | if (!early_ioremap_nested) | 
|  | 555 | return 0; | 
| Arjan van de Ven | 0c072bb | 2008-07-08 09:50:22 -0700 | [diff] [blame] | 556 | WARN(1, KERN_WARNING | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 557 | "Debug warning: early ioremap leak of %d areas detected.\n", | 
| Arjan van de Ven | 0c072bb | 2008-07-08 09:50:22 -0700 | [diff] [blame] | 558 | early_ioremap_nested); | 
| Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 559 | printk(KERN_WARNING | 
| Arjan van de Ven | 0c072bb | 2008-07-08 09:50:22 -0700 | [diff] [blame] | 560 | "please boot with early_ioremap_debug and report the dmesg.\n"); | 
| Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 561 |  | 
|  | 562 | return 1; | 
|  | 563 | } | 
|  | 564 | late_initcall(check_early_ioremap_leak); | 
|  | 565 |  | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 566 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | { | 
|  | 568 | unsigned long offset, last_addr; | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 569 | unsigned int nrpages, nesting; | 
|  | 570 | enum fixed_addresses idx0, idx; | 
|  | 571 |  | 
|  | 572 | WARN_ON(system_state != SYSTEM_BOOTING); | 
|  | 573 |  | 
|  | 574 | nesting = early_ioremap_nested; | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 575 | if (early_ioremap_debug) { | 
| Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 576 | printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 577 | phys_addr, size, nesting); | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 578 | dump_stack(); | 
|  | 579 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 |  | 
|  | 581 | /* Don't allow wraparound or zero size */ | 
|  | 582 | last_addr = phys_addr + size - 1; | 
| Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 583 | if (!size || last_addr < phys_addr) { | 
|  | 584 | WARN_ON(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | return NULL; | 
| Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 586 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 |  | 
| Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 588 | if (nesting >= FIX_BTMAPS_NESTING) { | 
|  | 589 | WARN_ON(1); | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 590 | return NULL; | 
| Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 591 | } | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 592 | early_ioremap_nested++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | /* | 
|  | 594 | * Mappings have to be page-aligned | 
|  | 595 | */ | 
|  | 596 | offset = phys_addr & ~PAGE_MASK; | 
|  | 597 | phys_addr &= PAGE_MASK; | 
|  | 598 | size = PAGE_ALIGN(last_addr) - phys_addr; | 
|  | 599 |  | 
|  | 600 | /* | 
|  | 601 | * Mappings have to fit in the FIX_BTMAP area. | 
|  | 602 | */ | 
|  | 603 | nrpages = size >> PAGE_SHIFT; | 
| Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 604 | if (nrpages > NR_FIX_BTMAPS) { | 
|  | 605 | WARN_ON(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | return NULL; | 
| Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 607 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 |  | 
|  | 609 | /* | 
|  | 610 | * Ok, go for it.. | 
|  | 611 | */ | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 612 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; | 
|  | 613 | idx = idx0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | while (nrpages > 0) { | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 615 | early_set_fixmap(idx, phys_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | phys_addr += PAGE_SIZE; | 
|  | 617 | --idx; | 
|  | 618 | --nrpages; | 
|  | 619 | } | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 620 | if (early_ioremap_debug) | 
|  | 621 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 622 |  | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 623 | return (void *) (offset + fix_to_virt(idx0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | } | 
|  | 625 |  | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 626 | void __init early_iounmap(void *addr, unsigned long size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | { | 
|  | 628 | unsigned long virt_addr; | 
|  | 629 | unsigned long offset; | 
|  | 630 | unsigned int nrpages; | 
|  | 631 | enum fixed_addresses idx; | 
| Ingo Molnar | 226e9a9 | 2008-05-27 09:56:49 +0200 | [diff] [blame] | 632 | int nesting; | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 633 |  | 
|  | 634 | nesting = --early_ioremap_nested; | 
| Ingo Molnar | 226e9a9 | 2008-05-27 09:56:49 +0200 | [diff] [blame] | 635 | if (WARN_ON(nesting < 0)) | 
|  | 636 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 |  | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 638 | if (early_ioremap_debug) { | 
| Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 639 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, | 
| Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 640 | size, nesting); | 
| Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 641 | dump_stack(); | 
|  | 642 | } | 
|  | 643 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | virt_addr = (unsigned long)addr; | 
| Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 645 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { | 
|  | 646 | WARN_ON(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | return; | 
| Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 648 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | offset = virt_addr & ~PAGE_MASK; | 
|  | 650 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; | 
|  | 651 |  | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 652 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | while (nrpages > 0) { | 
| Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 654 | early_clear_fixmap(idx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | --idx; | 
|  | 656 | --nrpages; | 
|  | 657 | } | 
|  | 658 | } | 
| Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 659 |  | 
|  | 660 | void __this_fixmap_does_not_exist(void) | 
|  | 661 | { | 
|  | 662 | WARN_ON(1); | 
|  | 663 | } |