Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Re-map IO memory to kernel address space so that we can access it. |
| 3 | * This is needed for high PCI addresses that aren't mapped in the |
| 4 | * 640k-1MB IO memory area on PC's |
| 5 | * |
| 6 | * (C) Copyright 1995 1996 Linus Torvalds |
| 7 | */ |
| 8 | |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 9 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/init.h> |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 11 | #include <linux/io.h> |
Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/vmalloc.h> |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 15 | #include <linux/mmiotrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 17 | #include <asm/cacheflush.h> |
| 18 | #include <asm/e820.h> |
| 19 | #include <asm/fixmap.h> |
| 20 | #include <asm/pgtable.h> |
| 21 | #include <asm/tlbflush.h> |
Jeremy Fitzhardinge | f6df72e | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 22 | #include <asm/pgalloc.h> |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 23 | #include <asm/pat.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Jan Beulich | 13c6c53 | 2009-03-12 12:37:34 +0000 | [diff] [blame] | 25 | static inline int phys_addr_valid(resource_size_t addr) |
Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 26 | { |
Jan Beulich | 13c6c53 | 2009-03-12 12:37:34 +0000 | [diff] [blame] | 27 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 28 | return !(addr >> boot_cpu_data.x86_phys_bits); |
| 29 | #else |
| 30 | return 1; |
| 31 | #endif |
Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 32 | } |
| 33 | |
Jan Beulich | 13c6c53 | 2009-03-12 12:37:34 +0000 | [diff] [blame] | 34 | #ifdef CONFIG_X86_64 |
| 35 | |
Jiri Slaby | 59ea746 | 2008-06-12 13:56:40 +0200 | [diff] [blame] | 36 | unsigned long __phys_addr(unsigned long x) |
| 37 | { |
| 38 | if (x >= __START_KERNEL_map) { |
| 39 | x -= __START_KERNEL_map; |
| 40 | VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE); |
| 41 | x += phys_base; |
| 42 | } else { |
| 43 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); |
| 44 | x -= PAGE_OFFSET; |
Jeremy Fitzhardinge | ed26dbe | 2009-03-04 16:16:51 -0800 | [diff] [blame] | 45 | VIRTUAL_BUG_ON(!phys_addr_valid(x)); |
Jiri Slaby | 59ea746 | 2008-06-12 13:56:40 +0200 | [diff] [blame] | 46 | } |
| 47 | return x; |
| 48 | } |
| 49 | EXPORT_SYMBOL(__phys_addr); |
| 50 | |
Vegard Nossum | af5c2bd | 2008-10-03 17:54:25 +0200 | [diff] [blame] | 51 | bool __virt_addr_valid(unsigned long x) |
| 52 | { |
| 53 | if (x >= __START_KERNEL_map) { |
| 54 | x -= __START_KERNEL_map; |
| 55 | if (x >= KERNEL_IMAGE_SIZE) |
| 56 | return false; |
| 57 | x += phys_base; |
| 58 | } else { |
| 59 | if (x < PAGE_OFFSET) |
| 60 | return false; |
| 61 | x -= PAGE_OFFSET; |
Jeremy Fitzhardinge | ed26dbe | 2009-03-04 16:16:51 -0800 | [diff] [blame] | 62 | if (!phys_addr_valid(x)) |
Vegard Nossum | af5c2bd | 2008-10-03 17:54:25 +0200 | [diff] [blame] | 63 | return false; |
Vegard Nossum | af5c2bd | 2008-10-03 17:54:25 +0200 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | return pfn_valid(x >> PAGE_SHIFT); |
| 67 | } |
| 68 | EXPORT_SYMBOL(__virt_addr_valid); |
| 69 | |
Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 70 | #else |
| 71 | |
Jiri Slaby | a1bf963 | 2008-06-12 13:56:40 +0200 | [diff] [blame] | 72 | #ifdef CONFIG_DEBUG_VIRTUAL |
Jiri Slaby | 59ea746 | 2008-06-12 13:56:40 +0200 | [diff] [blame] | 73 | unsigned long __phys_addr(unsigned long x) |
| 74 | { |
Jeremy Fitzhardinge | dc16ecf | 2009-03-04 16:10:44 -0800 | [diff] [blame] | 75 | /* VMALLOC_* aren't constants */ |
Vegard Nossum | af5c2bd | 2008-10-03 17:54:25 +0200 | [diff] [blame] | 76 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); |
Jeremy Fitzhardinge | dc16ecf | 2009-03-04 16:10:44 -0800 | [diff] [blame] | 77 | VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); |
Jiri Slaby | 59ea746 | 2008-06-12 13:56:40 +0200 | [diff] [blame] | 78 | return x - PAGE_OFFSET; |
| 79 | } |
| 80 | EXPORT_SYMBOL(__phys_addr); |
Jiri Slaby | a1bf963 | 2008-06-12 13:56:40 +0200 | [diff] [blame] | 81 | #endif |
Jiri Slaby | 59ea746 | 2008-06-12 13:56:40 +0200 | [diff] [blame] | 82 | |
Vegard Nossum | af5c2bd | 2008-10-03 17:54:25 +0200 | [diff] [blame] | 83 | bool __virt_addr_valid(unsigned long x) |
| 84 | { |
| 85 | if (x < PAGE_OFFSET) |
| 86 | return false; |
Jeremy Fitzhardinge | dc16ecf | 2009-03-04 16:10:44 -0800 | [diff] [blame] | 87 | if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) |
Vegard Nossum | af5c2bd | 2008-10-03 17:54:25 +0200 | [diff] [blame] | 88 | return false; |
Jeremy Fitzhardinge | 0feca85 | 2009-03-06 10:09:26 -0800 | [diff] [blame] | 89 | if (x >= FIXADDR_START) |
| 90 | return false; |
Vegard Nossum | af5c2bd | 2008-10-03 17:54:25 +0200 | [diff] [blame] | 91 | return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); |
| 92 | } |
| 93 | EXPORT_SYMBOL(__virt_addr_valid); |
| 94 | |
Thomas Gleixner | 240d3a7 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 95 | #endif |
| 96 | |
Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 97 | int page_is_ram(unsigned long pagenr) |
| 98 | { |
Ingo Molnar | 756a6c6 | 2008-03-25 08:31:17 +0100 | [diff] [blame] | 99 | resource_size_t addr, end; |
Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 100 | int i; |
| 101 | |
Arjan van de Ven | d8a9e6a | 2008-02-18 09:54:33 -0800 | [diff] [blame] | 102 | /* |
| 103 | * A special case is the first 4Kb of memory; |
| 104 | * This is a BIOS owned area, not kernel ram, but generally |
| 105 | * not listed as such in the E820 table. |
| 106 | */ |
| 107 | if (pagenr == 0) |
| 108 | return 0; |
| 109 | |
Arjan van de Ven | 156fbc3 | 2008-02-18 09:58:45 -0800 | [diff] [blame] | 110 | /* |
| 111 | * Second special case: Some BIOSen report the PC BIOS |
| 112 | * area (640->1Mb) as ram even though it is not. |
| 113 | */ |
| 114 | if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) && |
| 115 | pagenr < (BIOS_END >> PAGE_SHIFT)) |
| 116 | return 0; |
Arjan van de Ven | d8a9e6a | 2008-02-18 09:54:33 -0800 | [diff] [blame] | 117 | |
Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 118 | for (i = 0; i < e820.nr_map; i++) { |
| 119 | /* |
| 120 | * Not usable memory: |
| 121 | */ |
| 122 | if (e820.map[i].type != E820_RAM) |
| 123 | continue; |
Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 124 | addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; |
| 125 | end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; |
Thomas Gleixner | 950f9d9 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 126 | |
Thomas Gleixner | 950f9d9 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 127 | |
Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 128 | if ((pagenr >= addr) && (pagenr < end)) |
| 129 | return 1; |
| 130 | } |
| 131 | return 0; |
| 132 | } |
| 133 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | /* |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 135 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
| 136 | * conflicts. |
| 137 | */ |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 138 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
| 139 | unsigned long prot_val) |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 140 | { |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 141 | unsigned long nrpages = size >> PAGE_SHIFT; |
Harvey Harrison | 93809be | 2008-02-01 17:49:43 +0100 | [diff] [blame] | 142 | int err; |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 143 | |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 144 | switch (prot_val) { |
| 145 | case _PAGE_CACHE_UC: |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 146 | default: |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 147 | err = _set_memory_uc(vaddr, nrpages); |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 148 | break; |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 149 | case _PAGE_CACHE_WC: |
| 150 | err = _set_memory_wc(vaddr, nrpages); |
| 151 | break; |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 152 | case _PAGE_CACHE_WB: |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 153 | err = _set_memory_wb(vaddr, nrpages); |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 154 | break; |
| 155 | } |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 156 | |
| 157 | return err; |
| 158 | } |
| 159 | |
| 160 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | * Remap an arbitrary physical address space into the kernel virtual |
| 162 | * address space. Needed when the kernel wants to access high addresses |
| 163 | * directly. |
| 164 | * |
| 165 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 166 | * have to convert them into an offset in a page-aligned mapping, but the |
| 167 | * caller shouldn't need to know that small detail. |
| 168 | */ |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 169 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
| 170 | unsigned long size, unsigned long prot_val, void *caller) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | { |
Ingo Molnar | 756a6c6 | 2008-03-25 08:31:17 +0100 | [diff] [blame] | 172 | unsigned long pfn, offset, vaddr; |
| 173 | resource_size_t last_addr; |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 174 | const resource_size_t unaligned_phys_addr = phys_addr; |
| 175 | const unsigned long unaligned_size = size; |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 176 | struct vm_struct *area; |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 177 | unsigned long new_prot_val; |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 178 | pgprot_t prot; |
Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 179 | int retval; |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 180 | void __iomem *ret_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
| 182 | /* Don't allow wraparound or zero size */ |
| 183 | last_addr = phys_addr + size - 1; |
| 184 | if (!size || last_addr < phys_addr) |
| 185 | return NULL; |
| 186 | |
Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 187 | if (!phys_addr_valid(phys_addr)) { |
venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 188 | printk(KERN_WARNING "ioremap: invalid physical address %llx\n", |
Randy Dunlap | 4c8337a | 2008-04-10 15:09:50 -0700 | [diff] [blame] | 189 | (unsigned long long)phys_addr); |
Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 190 | WARN_ON_ONCE(1); |
| 191 | return NULL; |
| 192 | } |
| 193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | /* |
| 195 | * Don't remap the low PCI/ISA area, it's always mapped.. |
| 196 | */ |
Andreas Herrmann | bcc643d | 2008-06-20 21:58:46 +0200 | [diff] [blame] | 197 | if (is_ISA_range(phys_addr, last_addr)) |
Thomas Gleixner | 4b40fce | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 198 | return (__force void __iomem *)phys_to_virt(phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
| 200 | /* |
Suresh Siddha | 379daf6 | 2008-09-25 18:43:34 -0700 | [diff] [blame] | 201 | * Check if the request spans more than any BAR in the iomem resource |
| 202 | * tree. |
| 203 | */ |
Ingo Molnar | 8808500 | 2008-12-12 09:20:12 +0100 | [diff] [blame] | 204 | WARN_ONCE(iomem_map_sanity_check(phys_addr, size), |
| 205 | KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); |
Suresh Siddha | 379daf6 | 2008-09-25 18:43:34 -0700 | [diff] [blame] | 206 | |
| 207 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | * Don't allow anybody to remap normal RAM that we're using.. |
| 209 | */ |
Andres Salomon | cb8ab68 | 2008-04-30 11:30:24 -0400 | [diff] [blame] | 210 | for (pfn = phys_addr >> PAGE_SHIFT; |
| 211 | (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); |
| 212 | pfn++) { |
Ingo Molnar | bdd3cee | 2008-02-28 14:10:49 +0100 | [diff] [blame] | 213 | |
Ingo Molnar | ba748d2 | 2008-03-03 09:37:41 +0100 | [diff] [blame] | 214 | int is_ram = page_is_ram(pfn); |
| 215 | |
| 216 | if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) |
Thomas Gleixner | 266b9f8 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 217 | return NULL; |
Ingo Molnar | ba748d2 | 2008-03-03 09:37:41 +0100 | [diff] [blame] | 218 | WARN_ON_ONCE(is_ram); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | } |
| 220 | |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 221 | /* |
| 222 | * Mappings have to be page-aligned |
| 223 | */ |
| 224 | offset = phys_addr & ~PAGE_MASK; |
| 225 | phys_addr &= PAGE_MASK; |
| 226 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
| 227 | |
Andi Kleen | e213e87 | 2008-08-15 18:12:47 +0200 | [diff] [blame] | 228 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 229 | prot_val, &new_prot_val); |
| 230 | if (retval) { |
Venkatesh Pallipadi | 279e669 | 2009-07-10 09:57:33 -0700 | [diff] [blame] | 231 | printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); |
Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 232 | return NULL; |
| 233 | } |
| 234 | |
| 235 | if (prot_val != new_prot_val) { |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame^] | 236 | if (!is_new_memtype_allowed(phys_addr, size, |
| 237 | prot_val, new_prot_val)) { |
Venkatesh Pallipadi | 279e669 | 2009-07-10 09:57:33 -0700 | [diff] [blame] | 238 | printk(KERN_ERR |
venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 239 | "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", |
Randy Dunlap | 4c8337a | 2008-04-10 15:09:50 -0700 | [diff] [blame] | 240 | (unsigned long long)phys_addr, |
| 241 | (unsigned long long)(phys_addr + size), |
venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 242 | prot_val, new_prot_val); |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 243 | free_memtype(phys_addr, phys_addr + size); |
| 244 | return NULL; |
| 245 | } |
| 246 | prot_val = new_prot_val; |
| 247 | } |
| 248 | |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 249 | switch (prot_val) { |
| 250 | case _PAGE_CACHE_UC: |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 251 | default: |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 252 | prot = PAGE_KERNEL_IO_NOCACHE; |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 253 | break; |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 254 | case _PAGE_CACHE_UC_MINUS: |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 255 | prot = PAGE_KERNEL_IO_UC_MINUS; |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 256 | break; |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 257 | case _PAGE_CACHE_WC: |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 258 | prot = PAGE_KERNEL_IO_WC; |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 259 | break; |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 260 | case _PAGE_CACHE_WB: |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 261 | prot = PAGE_KERNEL_IO; |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 262 | break; |
| 263 | } |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | * Ok, go for it.. |
| 267 | */ |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 268 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | if (!area) |
| 270 | return NULL; |
| 271 | area->phys_addr = phys_addr; |
Thomas Gleixner | e66aadb | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 272 | vaddr = (unsigned long) area->addr; |
Suresh Siddha | 43a432b | 2009-04-09 14:26:47 -0700 | [diff] [blame] | 273 | |
| 274 | if (kernel_map_sync_memtype(phys_addr, size, prot_val)) { |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 275 | free_memtype(phys_addr, phys_addr + size); |
Ingo Molnar | b16bf71 | 2008-02-28 14:02:08 +0100 | [diff] [blame] | 276 | free_vm_area(area); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | return NULL; |
| 278 | } |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 279 | |
Suresh Siddha | 43a432b | 2009-04-09 14:26:47 -0700 | [diff] [blame] | 280 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 281 | free_memtype(phys_addr, phys_addr + size); |
Suresh Siddha | 43a432b | 2009-04-09 14:26:47 -0700 | [diff] [blame] | 282 | free_vm_area(area); |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 283 | return NULL; |
| 284 | } |
| 285 | |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 286 | ret_addr = (void __iomem *) (vaddr + offset); |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 287 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 288 | |
| 289 | return ret_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | |
| 292 | /** |
| 293 | * ioremap_nocache - map bus memory into CPU space |
| 294 | * @offset: bus address of the memory |
| 295 | * @size: size of the resource to map |
| 296 | * |
| 297 | * ioremap_nocache performs a platform specific sequence of operations to |
| 298 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 299 | * writew/writel functions and the other mmio helpers. The returned |
| 300 | * address is not guaranteed to be usable directly as a virtual |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 301 | * address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | * |
| 303 | * This version of ioremap ensures that the memory is marked uncachable |
| 304 | * on the CPU as well as honouring existing caching rules from things like |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 305 | * the PCI bus. Note that there are other caches and buffers on many |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | * busses. In particular driver authors should read up on PCI writes |
| 307 | * |
| 308 | * It's useful if some control registers are in such an area and |
| 309 | * write combining or read caching is not desirable: |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 310 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | * Must be freed with iounmap. |
| 312 | */ |
Linus Torvalds | b9e76a0 | 2008-03-24 11:22:39 -0700 | [diff] [blame] | 313 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | { |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 315 | /* |
| 316 | * Ideally, this should be: |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 317 | * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 318 | * |
| 319 | * Till we fix all X drivers to use ioremap_wc(), we will use |
| 320 | * UC MINUS. |
| 321 | */ |
| 322 | unsigned long val = _PAGE_CACHE_UC_MINUS; |
| 323 | |
| 324 | return __ioremap_caller(phys_addr, size, val, |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 325 | __builtin_return_address(0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 327 | EXPORT_SYMBOL(ioremap_nocache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 329 | /** |
| 330 | * ioremap_wc - map memory into CPU space write combined |
| 331 | * @offset: bus address of the memory |
| 332 | * @size: size of the resource to map |
| 333 | * |
| 334 | * This version of ioremap ensures that the memory is marked write combining. |
| 335 | * Write combining allows faster writes to some hardware devices. |
| 336 | * |
| 337 | * Must be freed with iounmap. |
| 338 | */ |
venkatesh.pallipadi@intel.com | d639bab | 2009-01-09 16:13:13 -0800 | [diff] [blame] | 339 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 340 | { |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 341 | if (pat_enabled) |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 342 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, |
| 343 | __builtin_return_address(0)); |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 344 | else |
| 345 | return ioremap_nocache(phys_addr, size); |
| 346 | } |
| 347 | EXPORT_SYMBOL(ioremap_wc); |
| 348 | |
Linus Torvalds | b9e76a0 | 2008-03-24 11:22:39 -0700 | [diff] [blame] | 349 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 350 | { |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 351 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, |
| 352 | __builtin_return_address(0)); |
Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 353 | } |
| 354 | EXPORT_SYMBOL(ioremap_cache); |
| 355 | |
Venkatesh Pallipadi | a361ee5 | 2008-07-10 10:09:59 +0200 | [diff] [blame] | 356 | static void __iomem *ioremap_default(resource_size_t phys_addr, |
| 357 | unsigned long size) |
| 358 | { |
| 359 | unsigned long flags; |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 360 | void __iomem *ret; |
Venkatesh Pallipadi | a361ee5 | 2008-07-10 10:09:59 +0200 | [diff] [blame] | 361 | int err; |
| 362 | |
| 363 | /* |
| 364 | * - WB for WB-able memory and no other conflicting mappings |
| 365 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings |
| 366 | * - Inherit from confliting mappings otherwise |
| 367 | */ |
Suresh Siddha | b6ff32d | 2009-04-09 14:26:51 -0700 | [diff] [blame] | 368 | err = reserve_memtype(phys_addr, phys_addr + size, |
| 369 | _PAGE_CACHE_WB, &flags); |
Venkatesh Pallipadi | a361ee5 | 2008-07-10 10:09:59 +0200 | [diff] [blame] | 370 | if (err < 0) |
| 371 | return NULL; |
| 372 | |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 373 | ret = __ioremap_caller(phys_addr, size, flags, |
| 374 | __builtin_return_address(0)); |
Venkatesh Pallipadi | a361ee5 | 2008-07-10 10:09:59 +0200 | [diff] [blame] | 375 | |
| 376 | free_memtype(phys_addr, phys_addr + size); |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 377 | return ret; |
Venkatesh Pallipadi | a361ee5 | 2008-07-10 10:09:59 +0200 | [diff] [blame] | 378 | } |
| 379 | |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 380 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
| 381 | unsigned long prot_val) |
| 382 | { |
| 383 | return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), |
| 384 | __builtin_return_address(0)); |
| 385 | } |
| 386 | EXPORT_SYMBOL(ioremap_prot); |
| 387 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 388 | /** |
| 389 | * iounmap - Free a IO remapping |
| 390 | * @addr: virtual address from ioremap_* |
| 391 | * |
| 392 | * Caller must ensure there is only one unmapping for the same pointer. |
| 393 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | void iounmap(volatile void __iomem *addr) |
| 395 | { |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 396 | struct vm_struct *p, *o; |
Andrew Morton | c23a4e9 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 397 | |
| 398 | if ((void __force *)addr <= high_memory) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | return; |
| 400 | |
| 401 | /* |
| 402 | * __ioremap special-cases the PCI/ISA range by not instantiating a |
| 403 | * vm_area and by simply returning an address into the kernel mapping |
| 404 | * of ISA space. So handle that here. |
| 405 | */ |
Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 406 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
| 407 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | return; |
| 409 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 410 | addr = (volatile void __iomem *) |
| 411 | (PAGE_MASK & (unsigned long __force)addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 412 | |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 413 | mmiotrace_iounmap(addr); |
| 414 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 415 | /* Use the vm area unlocked, assuming the caller |
| 416 | ensures there isn't another iounmap for the same address |
| 417 | in parallel. Reuse of the virtual address is prevented by |
| 418 | leaving it in the global lists until we're done with it. |
| 419 | cpa takes care of the direct mappings. */ |
| 420 | read_lock(&vmlist_lock); |
| 421 | for (p = vmlist; p; p = p->next) { |
Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 422 | if (p->addr == (void __force *)addr) |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 423 | break; |
| 424 | } |
| 425 | read_unlock(&vmlist_lock); |
| 426 | |
| 427 | if (!p) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 428 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
Andrew Morton | c23a4e9 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 429 | dump_stack(); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 430 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | } |
| 432 | |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 433 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
| 434 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 435 | /* Finally remove it */ |
Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 436 | o = remove_vm_area((void __force *)addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 437 | BUG_ON(p != o || o == NULL); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 438 | kfree(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 440 | EXPORT_SYMBOL(iounmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 442 | /* |
| 443 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
| 444 | * access |
| 445 | */ |
| 446 | void *xlate_dev_mem_ptr(unsigned long phys) |
| 447 | { |
| 448 | void *addr; |
| 449 | unsigned long start = phys & PAGE_MASK; |
| 450 | |
| 451 | /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ |
| 452 | if (page_is_ram(start >> PAGE_SHIFT)) |
| 453 | return __va(phys); |
| 454 | |
Ingo Molnar | ae94b80 | 2008-07-12 07:29:02 +0200 | [diff] [blame] | 455 | addr = (void __force *)ioremap_default(start, PAGE_SIZE); |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 456 | if (addr) |
| 457 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); |
| 458 | |
| 459 | return addr; |
| 460 | } |
| 461 | |
| 462 | void unxlate_dev_mem_ptr(unsigned long phys, void *addr) |
| 463 | { |
| 464 | if (page_is_ram(phys >> PAGE_SHIFT)) |
| 465 | return; |
| 466 | |
| 467 | iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); |
| 468 | return; |
| 469 | } |
| 470 | |
Jaswinder Singh | 4b6e9f2 | 2008-07-23 17:39:16 +0530 | [diff] [blame] | 471 | static int __initdata early_ioremap_debug; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 472 | |
| 473 | static int __init early_ioremap_debug_setup(char *str) |
| 474 | { |
| 475 | early_ioremap_debug = 1; |
| 476 | |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 477 | return 0; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 478 | } |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 479 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 480 | |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 481 | static __initdata int after_paging_init; |
Jeremy Fitzhardinge | 45c7b28 | 2009-03-20 17:53:34 -0700 | [diff] [blame] | 482 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 483 | |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 484 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 485 | { |
Jeremy Fitzhardinge | 37cc8d7 | 2008-02-13 16:20:35 +0100 | [diff] [blame] | 486 | /* Don't assume we're using swapper_pg_dir at this point */ |
| 487 | pgd_t *base = __va(read_cr3()); |
| 488 | pgd_t *pgd = &base[pgd_index(addr)]; |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 489 | pud_t *pud = pud_offset(pgd, addr); |
| 490 | pmd_t *pmd = pmd_offset(pud, addr); |
| 491 | |
| 492 | return pmd; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 493 | } |
| 494 | |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 495 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 496 | { |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 497 | return &bm_pte[pte_index(addr)]; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 498 | } |
| 499 | |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 500 | static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; |
| 501 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 502 | void __init early_ioremap_init(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 503 | { |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 504 | pmd_t *pmd; |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 505 | int i; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 506 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 507 | if (early_ioremap_debug) |
Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 508 | printk(KERN_INFO "early_ioremap_init()\n"); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 509 | |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 510 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) |
Wang Chen | 9f4f25c | 2009-03-25 14:07:11 +0100 | [diff] [blame] | 511 | slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 512 | |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 513 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
Jeremy Fitzhardinge | 45c7b28 | 2009-03-20 17:53:34 -0700 | [diff] [blame] | 514 | memset(bm_pte, 0, sizeof(bm_pte)); |
| 515 | pmd_populate_kernel(&init_mm, pmd, bm_pte); |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 516 | |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 517 | /* |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 518 | * The boot-ioremap range spans multiple pmds, for which |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 519 | * we are not prepared: |
| 520 | */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 521 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 522 | WARN_ON(1); |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 523 | printk(KERN_WARNING "pmd %p != %p\n", |
| 524 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 525 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 526 | fix_to_virt(FIX_BTMAP_BEGIN)); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 527 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 528 | fix_to_virt(FIX_BTMAP_END)); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 529 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 530 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); |
| 531 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", |
| 532 | FIX_BTMAP_BEGIN); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 533 | } |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 534 | } |
| 535 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 536 | void __init early_ioremap_reset(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 537 | { |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 538 | after_paging_init = 1; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 539 | } |
| 540 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 541 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 542 | phys_addr_t phys, pgprot_t flags) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 543 | { |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 544 | unsigned long addr = __fix_to_virt(idx); |
| 545 | pte_t *pte; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 546 | |
| 547 | if (idx >= __end_of_fixed_addresses) { |
| 548 | BUG(); |
| 549 | return; |
| 550 | } |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 551 | pte = early_ioremap_pte(addr); |
Jeremy Fitzhardinge | 4583ed5 | 2008-06-25 00:19:03 -0400 | [diff] [blame] | 552 | |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 553 | if (pgprot_val(flags)) |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 554 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 555 | else |
Jeremy Fitzhardinge | 4f9c11d | 2008-06-25 00:19:19 -0400 | [diff] [blame] | 556 | pte_clear(&init_mm, addr, pte); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 557 | __flush_tlb_one(addr); |
| 558 | } |
| 559 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 560 | static inline void __init early_set_fixmap(enum fixed_addresses idx, |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 561 | phys_addr_t phys, pgprot_t prot) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 562 | { |
| 563 | if (after_paging_init) |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 564 | __set_fixmap(idx, phys, prot); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 565 | else |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 566 | __early_set_fixmap(idx, phys, prot); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 567 | } |
| 568 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 569 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 570 | { |
| 571 | if (after_paging_init) |
| 572 | clear_fixmap(idx); |
| 573 | else |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 574 | __early_set_fixmap(idx, 0, __pgprot(0)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 575 | } |
| 576 | |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 577 | static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 578 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 579 | |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 580 | static int __init check_early_ioremap_leak(void) |
| 581 | { |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 582 | int count = 0; |
| 583 | int i; |
| 584 | |
| 585 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) |
| 586 | if (prev_map[i]) |
| 587 | count++; |
| 588 | |
| 589 | if (!count) |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 590 | return 0; |
Arjan van de Ven | 0c072bb | 2008-07-08 09:50:22 -0700 | [diff] [blame] | 591 | WARN(1, KERN_WARNING |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 592 | "Debug warning: early ioremap leak of %d areas detected.\n", |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 593 | count); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 594 | printk(KERN_WARNING |
Arjan van de Ven | 0c072bb | 2008-07-08 09:50:22 -0700 | [diff] [blame] | 595 | "please boot with early_ioremap_debug and report the dmesg.\n"); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 596 | |
| 597 | return 1; |
| 598 | } |
| 599 | late_initcall(check_early_ioremap_leak); |
| 600 | |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 601 | static void __init __iomem * |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 602 | __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | { |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 604 | unsigned long offset; |
| 605 | resource_size_t last_addr; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 606 | unsigned int nrpages; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 607 | enum fixed_addresses idx0, idx; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 608 | int i, slot; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 609 | |
| 610 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 611 | |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 612 | slot = -1; |
| 613 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 614 | if (!prev_map[i]) { |
| 615 | slot = i; |
| 616 | break; |
| 617 | } |
| 618 | } |
| 619 | |
| 620 | if (slot < 0) { |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 621 | printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n", |
| 622 | (u64)phys_addr, size); |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 623 | WARN_ON(1); |
| 624 | return NULL; |
| 625 | } |
| 626 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 627 | if (early_ioremap_debug) { |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 628 | printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ", |
| 629 | (u64)phys_addr, size, slot); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 630 | dump_stack(); |
| 631 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | |
| 633 | /* Don't allow wraparound or zero size */ |
| 634 | last_addr = phys_addr + size - 1; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 635 | if (!size || last_addr < phys_addr) { |
| 636 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 638 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 640 | prev_size[slot] = size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | /* |
| 642 | * Mappings have to be page-aligned |
| 643 | */ |
| 644 | offset = phys_addr & ~PAGE_MASK; |
| 645 | phys_addr &= PAGE_MASK; |
Alan Cox | c613ec1 | 2008-10-10 10:46:45 +0100 | [diff] [blame] | 646 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | |
| 648 | /* |
| 649 | * Mappings have to fit in the FIX_BTMAP area. |
| 650 | */ |
| 651 | nrpages = size >> PAGE_SHIFT; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 652 | if (nrpages > NR_FIX_BTMAPS) { |
| 653 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 655 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | |
| 657 | /* |
| 658 | * Ok, go for it.. |
| 659 | */ |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 660 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 661 | idx = idx0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | while (nrpages > 0) { |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 663 | early_set_fixmap(idx, phys_addr, prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | phys_addr += PAGE_SIZE; |
| 665 | --idx; |
| 666 | --nrpages; |
| 667 | } |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 668 | if (early_ioremap_debug) |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 669 | printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]); |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 670 | |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 671 | prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 672 | return prev_map[slot]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | } |
| 674 | |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 675 | /* Remap an IO device */ |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 676 | void __init __iomem * |
| 677 | early_ioremap(resource_size_t phys_addr, unsigned long size) |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 678 | { |
| 679 | return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); |
| 680 | } |
| 681 | |
| 682 | /* Remap memory */ |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 683 | void __init __iomem * |
| 684 | early_memremap(resource_size_t phys_addr, unsigned long size) |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 685 | { |
| 686 | return __early_ioremap(phys_addr, size, PAGE_KERNEL); |
| 687 | } |
| 688 | |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 689 | void __init early_iounmap(void __iomem *addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | { |
| 691 | unsigned long virt_addr; |
| 692 | unsigned long offset; |
| 693 | unsigned int nrpages; |
| 694 | enum fixed_addresses idx; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 695 | int i, slot; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 696 | |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 697 | slot = -1; |
| 698 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 699 | if (prev_map[i] == addr) { |
| 700 | slot = i; |
| 701 | break; |
| 702 | } |
| 703 | } |
| 704 | |
| 705 | if (slot < 0) { |
| 706 | printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n", |
| 707 | addr, size); |
| 708 | WARN_ON(1); |
Ingo Molnar | 226e9a9 | 2008-05-27 09:56:49 +0200 | [diff] [blame] | 709 | return; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 710 | } |
| 711 | |
| 712 | if (prev_size[slot] != size) { |
| 713 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n", |
| 714 | addr, size, slot, prev_size[slot]); |
| 715 | WARN_ON(1); |
| 716 | return; |
| 717 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 719 | if (early_ioremap_debug) { |
Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 720 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 721 | size, slot); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 722 | dump_stack(); |
| 723 | } |
| 724 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | virt_addr = (unsigned long)addr; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 726 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { |
| 727 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | return; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 729 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | offset = virt_addr & ~PAGE_MASK; |
| 731 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; |
| 732 | |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 733 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | while (nrpages > 0) { |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 735 | early_clear_fixmap(idx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | --idx; |
| 737 | --nrpages; |
| 738 | } |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 739 | prev_map[slot] = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | } |