blob: e4c43ec71b29ebeb40a40b63cd7f4394fb448b0a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020015#include <linux/mmiotrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010017#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010022#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Gleixner240d3a72008-01-30 13:34:05 +010025#ifdef CONFIG_X86_64
26
Thomas Gleixnere3100c82008-02-27 20:57:40 +010027static inline int phys_addr_valid(unsigned long addr)
28{
29 return addr < (1UL << boot_cpu_data.x86_phys_bits);
30}
31
Jiri Slaby59ea7462008-06-12 13:56:40 +020032unsigned long __phys_addr(unsigned long x)
33{
34 if (x >= __START_KERNEL_map) {
35 x -= __START_KERNEL_map;
36 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37 x += phys_base;
38 } else {
39 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40 x -= PAGE_OFFSET;
41 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
42 !phys_addr_valid(x));
43 }
44 return x;
45}
46EXPORT_SYMBOL(__phys_addr);
47
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020048bool __virt_addr_valid(unsigned long x)
49{
50 if (x >= __START_KERNEL_map) {
51 x -= __START_KERNEL_map;
52 if (x >= KERNEL_IMAGE_SIZE)
53 return false;
54 x += phys_base;
55 } else {
56 if (x < PAGE_OFFSET)
57 return false;
58 x -= PAGE_OFFSET;
59 if (system_state == SYSTEM_BOOTING ?
60 x > MAXMEM : !phys_addr_valid(x)) {
61 return false;
62 }
63 }
64
65 return pfn_valid(x >> PAGE_SHIFT);
66}
67EXPORT_SYMBOL(__virt_addr_valid);
68
Thomas Gleixnere3100c82008-02-27 20:57:40 +010069#else
70
71static inline int phys_addr_valid(unsigned long addr)
72{
73 return 1;
74}
75
Jiri Slabya1bf9632008-06-12 13:56:40 +020076#ifdef CONFIG_DEBUG_VIRTUAL
Jiri Slaby59ea7462008-06-12 13:56:40 +020077unsigned long __phys_addr(unsigned long x)
78{
79 /* VMALLOC_* aren't constants; not available at the boot time */
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020080 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81 VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82 is_vmalloc_addr((void *) x));
Jiri Slaby59ea7462008-06-12 13:56:40 +020083 return x - PAGE_OFFSET;
84}
85EXPORT_SYMBOL(__phys_addr);
Jiri Slabya1bf9632008-06-12 13:56:40 +020086#endif
Jiri Slaby59ea7462008-06-12 13:56:40 +020087
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020088bool __virt_addr_valid(unsigned long x)
89{
90 if (x < PAGE_OFFSET)
91 return false;
92 if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93 return false;
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95}
96EXPORT_SYMBOL(__virt_addr_valid);
97
Thomas Gleixner240d3a72008-01-30 13:34:05 +010098#endif
99
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100100int page_is_ram(unsigned long pagenr)
101{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100102 resource_size_t addr, end;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100103 int i;
104
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -0800105 /*
106 * A special case is the first 4Kb of memory;
107 * This is a BIOS owned area, not kernel ram, but generally
108 * not listed as such in the E820 table.
109 */
110 if (pagenr == 0)
111 return 0;
112
Arjan van de Ven156fbc32008-02-18 09:58:45 -0800113 /*
114 * Second special case: Some BIOSen report the PC BIOS
115 * area (640->1Mb) as ram even though it is not.
116 */
117 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118 pagenr < (BIOS_END >> PAGE_SHIFT))
119 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -0800120
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100121 for (i = 0; i < e820.nr_map; i++) {
122 /*
123 * Not usable memory:
124 */
125 if (e820.map[i].type != E820_RAM)
126 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100127 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
128 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +0100129
Thomas Gleixner950f9d92008-01-30 13:34:06 +0100130
Thomas Gleixner5f5192b2008-01-30 13:34:06 +0100131 if ((pagenr >= addr) && (pagenr < end))
132 return 1;
133 }
134 return 0;
135}
136
Suresh Siddha9542ada2008-09-24 08:53:33 -0700137int pagerange_is_ram(unsigned long start, unsigned long end)
138{
139 int ram_page = 0, not_rampage = 0;
140 unsigned long page_nr;
141
142 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143 ++page_nr) {
144 if (page_is_ram(page_nr))
145 ram_page = 1;
146 else
147 not_rampage = 1;
148
149 if (ram_page == not_rampage)
150 return -1;
151 }
152
153 return ram_page;
154}
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100157 * Fix up the linear direct mapping of the kernel to avoid cache attribute
158 * conflicts.
159 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700160int ioremap_change_attr(unsigned long vaddr, unsigned long size,
161 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100162{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100163 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +0100164 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100165
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700166 switch (prot_val) {
167 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100168 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700169 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100170 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700171 case _PAGE_CACHE_WC:
172 err = _set_memory_wc(vaddr, nrpages);
173 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700174 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700175 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100176 break;
177 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100178
179 return err;
180}
181
182/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * Remap an arbitrary physical address space into the kernel virtual
184 * address space. Needed when the kernel wants to access high addresses
185 * directly.
186 *
187 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188 * have to convert them into an offset in a page-aligned mapping, but the
189 * caller shouldn't need to know that small detail.
190 */
Christoph Lameter23016962008-04-28 02:12:42 -0700191static void __iomem *__ioremap_caller(resource_size_t phys_addr,
192 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100194 unsigned long pfn, offset, vaddr;
195 resource_size_t last_addr;
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200196 const resource_size_t unaligned_phys_addr = phys_addr;
197 const unsigned long unaligned_size = size;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100198 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700199 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100200 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700201 int retval;
Pekka Paalanend61fc442008-05-12 21:20:57 +0200202 void __iomem *ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 /* Don't allow wraparound or zero size */
205 last_addr = phys_addr + size - 1;
206 if (!size || last_addr < phys_addr)
207 return NULL;
208
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100209 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700210 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700211 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100212 WARN_ON_ONCE(1);
213 return NULL;
214 }
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /*
217 * Don't remap the low PCI/ISA area, it's always mapped..
218 */
Andreas Herrmannbcc643d2008-06-20 21:58:46 +0200219 if (is_ISA_range(phys_addr, last_addr))
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100220 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 /*
223 * Don't allow anybody to remap normal RAM that we're using..
224 */
Andres Salomoncb8ab682008-04-30 11:30:24 -0400225 for (pfn = phys_addr >> PAGE_SHIFT;
226 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
227 pfn++) {
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100228
Ingo Molnarba748d22008-03-03 09:37:41 +0100229 int is_ram = page_is_ram(pfn);
230
231 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100232 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100233 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 }
235
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700236 /*
237 * Mappings have to be page-aligned
238 */
239 offset = phys_addr & ~PAGE_MASK;
240 phys_addr &= PAGE_MASK;
241 size = PAGE_ALIGN(last_addr+1) - phys_addr;
242
Andi Kleene213e872008-08-15 18:12:47 +0200243 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700244 prot_val, &new_prot_val);
245 if (retval) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700246 pr_debug("Warning: reserve_memtype returned %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700247 return NULL;
248 }
249
250 if (prot_val != new_prot_val) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700251 /*
252 * Do not fallback to certain memory types with certain
253 * requested type:
Suresh Siddhade33c442008-04-25 17:07:22 -0700254 * - request is uc-, return cannot be write-back
255 * - request is uc-, return cannot be write-combine
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700256 * - request is write-combine, return cannot be write-back
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700257 */
Suresh Siddhade33c442008-04-25 17:07:22 -0700258 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700259 (new_prot_val == _PAGE_CACHE_WB ||
260 new_prot_val == _PAGE_CACHE_WC)) ||
261 (prot_val == _PAGE_CACHE_WC &&
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700262 new_prot_val == _PAGE_CACHE_WB)) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700263 pr_debug(
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700264 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700265 (unsigned long long)phys_addr,
266 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700267 prot_val, new_prot_val);
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700268 free_memtype(phys_addr, phys_addr + size);
269 return NULL;
270 }
271 prot_val = new_prot_val;
272 }
273
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700274 switch (prot_val) {
275 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100276 default:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700277 prot = PAGE_KERNEL_IO_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100278 break;
Suresh Siddhade33c442008-04-25 17:07:22 -0700279 case _PAGE_CACHE_UC_MINUS:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700280 prot = PAGE_KERNEL_IO_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700281 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700282 case _PAGE_CACHE_WC:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700283 prot = PAGE_KERNEL_IO_WC;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700284 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700285 case _PAGE_CACHE_WB:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700286 prot = PAGE_KERNEL_IO;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100287 break;
288 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700289
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 * Ok, go for it..
292 */
Christoph Lameter23016962008-04-28 02:12:42 -0700293 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (!area)
295 return NULL;
296 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100297 vaddr = (unsigned long) area->addr;
298 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700299 free_memtype(phys_addr, phys_addr + size);
Ingo Molnarb16bf712008-02-28 14:02:08 +0100300 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 return NULL;
302 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100303
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700304 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700305 free_memtype(phys_addr, phys_addr + size);
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100306 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100307 return NULL;
308 }
309
Pekka Paalanend61fc442008-05-12 21:20:57 +0200310 ret_addr = (void __iomem *) (vaddr + offset);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200311 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200312
313 return ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316/**
317 * ioremap_nocache - map bus memory into CPU space
318 * @offset: bus address of the memory
319 * @size: size of the resource to map
320 *
321 * ioremap_nocache performs a platform specific sequence of operations to
322 * make bus memory CPU accessible via the readb/readw/readl/writeb/
323 * writew/writel functions and the other mmio helpers. The returned
324 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100325 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 *
327 * This version of ioremap ensures that the memory is marked uncachable
328 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100329 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 * busses. In particular driver authors should read up on PCI writes
331 *
332 * It's useful if some control registers are in such an area and
333 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100334 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 * Must be freed with iounmap.
336 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700337void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
Suresh Siddhade33c442008-04-25 17:07:22 -0700339 /*
340 * Ideally, this should be:
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200341 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700342 *
343 * Till we fix all X drivers to use ioremap_wc(), we will use
344 * UC MINUS.
345 */
346 unsigned long val = _PAGE_CACHE_UC_MINUS;
347
348 return __ioremap_caller(phys_addr, size, val,
Christoph Lameter23016962008-04-28 02:12:42 -0700349 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700351EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700353/**
354 * ioremap_wc - map memory into CPU space write combined
355 * @offset: bus address of the memory
356 * @size: size of the resource to map
357 *
358 * This version of ioremap ensures that the memory is marked write combining.
359 * Write combining allows faster writes to some hardware devices.
360 *
361 * Must be freed with iounmap.
362 */
363void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
364{
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200365 if (pat_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700366 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
367 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700368 else
369 return ioremap_nocache(phys_addr, size);
370}
371EXPORT_SYMBOL(ioremap_wc);
372
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700373void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100374{
Christoph Lameter23016962008-04-28 02:12:42 -0700375 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
376 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100377}
378EXPORT_SYMBOL(ioremap_cache);
379
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200380static void __iomem *ioremap_default(resource_size_t phys_addr,
381 unsigned long size)
382{
383 unsigned long flags;
384 void *ret;
385 int err;
386
387 /*
388 * - WB for WB-able memory and no other conflicting mappings
389 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
390 * - Inherit from confliting mappings otherwise
391 */
392 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
393 if (err < 0)
394 return NULL;
395
396 ret = (void *) __ioremap_caller(phys_addr, size, flags,
397 __builtin_return_address(0));
398
399 free_memtype(phys_addr, phys_addr + size);
400 return (void __iomem *)ret;
401}
402
Rik van Riel28b2ee22008-07-23 21:27:05 -0700403void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
404 unsigned long prot_val)
405{
406 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
407 __builtin_return_address(0));
408}
409EXPORT_SYMBOL(ioremap_prot);
410
Andi Kleenbf5421c2005-12-12 22:17:09 -0800411/**
412 * iounmap - Free a IO remapping
413 * @addr: virtual address from ioremap_*
414 *
415 * Caller must ensure there is only one unmapping for the same pointer.
416 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417void iounmap(volatile void __iomem *addr)
418{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800419 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700420
421 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 return;
423
424 /*
425 * __ioremap special-cases the PCI/ISA range by not instantiating a
426 * vm_area and by simply returning an address into the kernel mapping
427 * of ISA space. So handle that here.
428 */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200429 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
430 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 return;
432
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100433 addr = (volatile void __iomem *)
434 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800435
Pekka Paalanend61fc442008-05-12 21:20:57 +0200436 mmiotrace_iounmap(addr);
437
Andi Kleenbf5421c2005-12-12 22:17:09 -0800438 /* Use the vm area unlocked, assuming the caller
439 ensures there isn't another iounmap for the same address
440 in parallel. Reuse of the virtual address is prevented by
441 leaving it in the global lists until we're done with it.
442 cpa takes care of the direct mappings. */
443 read_lock(&vmlist_lock);
444 for (p = vmlist; p; p = p->next) {
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200445 if (p->addr == (void __force *)addr)
Andi Kleenbf5421c2005-12-12 22:17:09 -0800446 break;
447 }
448 read_unlock(&vmlist_lock);
449
450 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100451 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700452 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800453 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
455
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700456 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
457
Andi Kleenbf5421c2005-12-12 22:17:09 -0800458 /* Finally remove it */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200459 o = remove_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800460 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100461 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700463EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700465/*
466 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
467 * access
468 */
469void *xlate_dev_mem_ptr(unsigned long phys)
470{
471 void *addr;
472 unsigned long start = phys & PAGE_MASK;
473
474 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
475 if (page_is_ram(start >> PAGE_SHIFT))
476 return __va(phys);
477
Ingo Molnarae94b802008-07-12 07:29:02 +0200478 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700479 if (addr)
480 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
481
482 return addr;
483}
484
485void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
486{
487 if (page_is_ram(phys >> PAGE_SHIFT))
488 return;
489
490 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
491 return;
492}
493
Jaswinder Singh4b6e9f22008-07-23 17:39:16 +0530494static int __initdata early_ioremap_debug;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100495
496static int __init early_ioremap_debug_setup(char *str)
497{
498 early_ioremap_debug = 1;
499
Huang, Ying793b24a2008-01-30 13:33:45 +0100500 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100501}
Huang, Ying793b24a2008-01-30 13:33:45 +0100502early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100503
Huang, Ying0947b2f2008-01-30 13:33:44 +0100504static __initdata int after_paging_init;
Jeremy Fitzhardingea7bf0bd2008-05-28 15:02:14 +0100505static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100506
Ian Campbell551889a2008-02-09 23:24:09 +0100507static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100508{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100509 /* Don't assume we're using swapper_pg_dir at this point */
510 pgd_t *base = __va(read_cr3());
511 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100512 pud_t *pud = pud_offset(pgd, addr);
513 pmd_t *pmd = pmd_offset(pud, addr);
514
515 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100516}
517
Ian Campbell551889a2008-02-09 23:24:09 +0100518static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100519{
Ian Campbell551889a2008-02-09 23:24:09 +0100520 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100521}
522
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100523void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100524{
Ian Campbell551889a2008-02-09 23:24:09 +0100525 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100526
Ingo Molnard18d6d62008-01-30 13:33:45 +0100527 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100528 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100529
Ian Campbell551889a2008-02-09 23:24:09 +0100530 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100531 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100532 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100533
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100534 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100535 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100536 * we are not prepared:
537 */
Ian Campbell551889a2008-02-09 23:24:09 +0100538 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100539 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100540 printk(KERN_WARNING "pmd %p != %p\n",
541 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100542 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100543 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100544 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100545 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100546
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100547 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
548 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
549 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100550 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100551}
552
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100553void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100554{
Ian Campbell551889a2008-02-09 23:24:09 +0100555 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100556
Ingo Molnard18d6d62008-01-30 13:33:45 +0100557 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100558 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100559
Ian Campbell551889a2008-02-09 23:24:09 +0100560 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
561 pmd_clear(pmd);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700562 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100563 __flush_tlb_all();
564}
565
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100566void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100567{
568 enum fixed_addresses idx;
Ian Campbell551889a2008-02-09 23:24:09 +0100569 unsigned long addr, phys;
570 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100571
572 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100573 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100574 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100575 pte = early_ioremap_pte(addr);
Ian Campbell551889a2008-02-09 23:24:09 +0100576 if (pte_present(*pte)) {
577 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100578 set_fixmap(idx, phys);
579 }
580 }
581}
582
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100583static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100584 unsigned long phys, pgprot_t flags)
585{
Ian Campbell551889a2008-02-09 23:24:09 +0100586 unsigned long addr = __fix_to_virt(idx);
587 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100588
589 if (idx >= __end_of_fixed_addresses) {
590 BUG();
591 return;
592 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100593 pte = early_ioremap_pte(addr);
Jeremy Fitzhardinge4583ed52008-06-25 00:19:03 -0400594
Huang, Ying0947b2f2008-01-30 13:33:44 +0100595 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100596 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100597 else
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400598 pte_clear(&init_mm, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100599 __flush_tlb_one(addr);
600}
601
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100602static inline void __init early_set_fixmap(enum fixed_addresses idx,
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700603 unsigned long phys, pgprot_t prot)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100604{
605 if (after_paging_init)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700606 __set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100607 else
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700608 __early_set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100609}
610
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100611static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100612{
613 if (after_paging_init)
614 clear_fixmap(idx);
615 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100616 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100617}
618
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700619static void *prev_map[FIX_BTMAPS_SLOTS] __initdata;
620static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
Ingo Molnard690b2a2008-01-30 13:33:47 +0100621static int __init check_early_ioremap_leak(void)
622{
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700623 int count = 0;
624 int i;
625
626 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
627 if (prev_map[i])
628 count++;
629
630 if (!count)
Ingo Molnard690b2a2008-01-30 13:33:47 +0100631 return 0;
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700632 WARN(1, KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100633 "Debug warning: early ioremap leak of %d areas detected.\n",
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700634 count);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100635 printk(KERN_WARNING
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700636 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100637
638 return 1;
639}
640late_initcall(check_early_ioremap_leak);
641
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700642static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
644 unsigned long offset, last_addr;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700645 unsigned int nrpages;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100646 enum fixed_addresses idx0, idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700647 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100648
649 WARN_ON(system_state != SYSTEM_BOOTING);
650
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700651 slot = -1;
652 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
653 if (!prev_map[i]) {
654 slot = i;
655 break;
656 }
657 }
658
659 if (slot < 0) {
660 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
661 phys_addr, size);
662 WARN_ON(1);
663 return NULL;
664 }
665
Ingo Molnard18d6d62008-01-30 13:33:45 +0100666 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100667 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700668 phys_addr, size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100669 dump_stack();
670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
672 /* Don't allow wraparound or zero size */
673 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100674 if (!size || last_addr < phys_addr) {
675 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100677 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700679 prev_size[slot] = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 /*
681 * Mappings have to be page-aligned
682 */
683 offset = phys_addr & ~PAGE_MASK;
684 phys_addr &= PAGE_MASK;
Alan Coxc613ec12008-10-10 10:46:45 +0100685 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
687 /*
688 * Mappings have to fit in the FIX_BTMAP area.
689 */
690 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100691 if (nrpages > NR_FIX_BTMAPS) {
692 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
696 /*
697 * Ok, go for it..
698 */
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700699 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100700 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 while (nrpages > 0) {
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700702 early_set_fixmap(idx, phys_addr, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 phys_addr += PAGE_SIZE;
704 --idx;
705 --nrpages;
706 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100707 if (early_ioremap_debug)
708 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100709
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700710 prev_map[slot] = (void *) (offset + fix_to_virt(idx0));
711 return prev_map[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712}
713
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700714/* Remap an IO device */
715void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
716{
717 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
718}
719
720/* Remap memory */
721void __init *early_memremap(unsigned long phys_addr, unsigned long size)
722{
723 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
724}
725
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100726void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
728 unsigned long virt_addr;
729 unsigned long offset;
730 unsigned int nrpages;
731 enum fixed_addresses idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700732 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100733
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700734 slot = -1;
735 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
736 if (prev_map[i] == addr) {
737 slot = i;
738 break;
739 }
740 }
741
742 if (slot < 0) {
743 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
744 addr, size);
745 WARN_ON(1);
Ingo Molnar226e9a92008-05-27 09:56:49 +0200746 return;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700747 }
748
749 if (prev_size[slot] != size) {
750 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
751 addr, size, slot, prev_size[slot]);
752 WARN_ON(1);
753 return;
754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Ingo Molnard18d6d62008-01-30 13:33:45 +0100756 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100757 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700758 size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100759 dump_stack();
760 }
761
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100763 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
764 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 offset = virt_addr & ~PAGE_MASK;
768 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
769
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700770 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100772 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 --idx;
774 --nrpages;
775 }
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700776 prev_map[slot] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100778
779void __this_fixmap_does_not_exist(void)
780{
781 WARN_ON(1);
782}