blob: 3af10dee01471753351b773e1339d62e6939d1c9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020015#include <linux/mmiotrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010017#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010022#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Jeremy Fitzhardinge78c86e52009-09-10 10:09:38 -070025#include "physaddr.h"
Thomas Gleixner240d3a72008-01-30 13:34:05 +010026
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010027int page_is_ram(unsigned long pagenr)
28{
Ingo Molnar756a6c62008-03-25 08:31:17 +010029 resource_size_t addr, end;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010030 int i;
31
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080032 /*
33 * A special case is the first 4Kb of memory;
34 * This is a BIOS owned area, not kernel ram, but generally
35 * not listed as such in the E820 table.
36 */
37 if (pagenr == 0)
38 return 0;
39
Arjan van de Ven156fbc32008-02-18 09:58:45 -080040 /*
41 * Second special case: Some BIOSen report the PC BIOS
42 * area (640->1Mb) as ram even though it is not.
43 */
44 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
45 pagenr < (BIOS_END >> PAGE_SHIFT))
46 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080047
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010048 for (i = 0; i < e820.nr_map; i++) {
49 /*
50 * Not usable memory:
51 */
52 if (e820.map[i].type != E820_RAM)
53 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010054 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
55 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010056
Thomas Gleixner950f9d92008-01-30 13:34:06 +010057
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010058 if ((pagenr >= addr) && (pagenr < end))
59 return 1;
60 }
61 return 0;
62}
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010065 * Fix up the linear direct mapping of the kernel to avoid cache attribute
66 * conflicts.
67 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070068int ioremap_change_attr(unsigned long vaddr, unsigned long size,
69 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010070{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010071 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010072 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010073
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070074 switch (prot_val) {
75 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010076 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070077 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010078 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -070079 case _PAGE_CACHE_WC:
80 err = _set_memory_wc(vaddr, nrpages);
81 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070082 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070083 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010084 break;
85 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010086
87 return err;
88}
89
90/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 * Remap an arbitrary physical address space into the kernel virtual
92 * address space. Needed when the kernel wants to access high addresses
93 * directly.
94 *
95 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
96 * have to convert them into an offset in a page-aligned mapping, but the
97 * caller shouldn't need to know that small detail.
98 */
Christoph Lameter23016962008-04-28 02:12:42 -070099static void __iomem *__ioremap_caller(resource_size_t phys_addr,
100 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100102 unsigned long pfn, offset, vaddr;
103 resource_size_t last_addr;
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200104 const resource_size_t unaligned_phys_addr = phys_addr;
105 const unsigned long unaligned_size = size;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100106 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700107 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100108 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700109 int retval;
Pekka Paalanend61fc442008-05-12 21:20:57 +0200110 void __iomem *ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112 /* Don't allow wraparound or zero size */
113 last_addr = phys_addr + size - 1;
114 if (!size || last_addr < phys_addr)
115 return NULL;
116
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100117 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700118 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700119 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100120 WARN_ON_ONCE(1);
121 return NULL;
122 }
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 /*
125 * Don't remap the low PCI/ISA area, it's always mapped..
126 */
Andreas Herrmannbcc643d2008-06-20 21:58:46 +0200127 if (is_ISA_range(phys_addr, last_addr))
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100128 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 /*
Suresh Siddha379daf62008-09-25 18:43:34 -0700131 * Check if the request spans more than any BAR in the iomem resource
132 * tree.
133 */
Ingo Molnar88085002008-12-12 09:20:12 +0100134 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
135 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
Suresh Siddha379daf62008-09-25 18:43:34 -0700136
137 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 * Don't allow anybody to remap normal RAM that we're using..
139 */
Andres Salomoncb8ab682008-04-30 11:30:24 -0400140 for (pfn = phys_addr >> PAGE_SHIFT;
141 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
142 pfn++) {
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100143
Ingo Molnarba748d22008-03-03 09:37:41 +0100144 int is_ram = page_is_ram(pfn);
145
146 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100147 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100148 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700151 /*
152 * Mappings have to be page-aligned
153 */
154 offset = phys_addr & ~PAGE_MASK;
155 phys_addr &= PAGE_MASK;
156 size = PAGE_ALIGN(last_addr+1) - phys_addr;
157
Andi Kleene213e872008-08-15 18:12:47 +0200158 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700159 prot_val, &new_prot_val);
160 if (retval) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700161 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700162 return NULL;
163 }
164
165 if (prot_val != new_prot_val) {
H. Peter Anvinb8551922009-08-26 17:17:51 -0700166 if (!is_new_memtype_allowed(phys_addr, size,
167 prot_val, new_prot_val)) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700168 printk(KERN_ERR
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700169 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700170 (unsigned long long)phys_addr,
171 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700172 prot_val, new_prot_val);
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700173 free_memtype(phys_addr, phys_addr + size);
174 return NULL;
175 }
176 prot_val = new_prot_val;
177 }
178
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700179 switch (prot_val) {
180 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100181 default:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700182 prot = PAGE_KERNEL_IO_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100183 break;
Suresh Siddhade33c442008-04-25 17:07:22 -0700184 case _PAGE_CACHE_UC_MINUS:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700185 prot = PAGE_KERNEL_IO_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700186 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700187 case _PAGE_CACHE_WC:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700188 prot = PAGE_KERNEL_IO_WC;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700189 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700190 case _PAGE_CACHE_WB:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700191 prot = PAGE_KERNEL_IO;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100192 break;
193 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 * Ok, go for it..
197 */
Christoph Lameter23016962008-04-28 02:12:42 -0700198 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (!area)
200 return NULL;
201 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100202 vaddr = (unsigned long) area->addr;
Suresh Siddha43a432b2009-04-09 14:26:47 -0700203
204 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700205 free_memtype(phys_addr, phys_addr + size);
Ingo Molnarb16bf712008-02-28 14:02:08 +0100206 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 return NULL;
208 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100209
Suresh Siddha43a432b2009-04-09 14:26:47 -0700210 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700211 free_memtype(phys_addr, phys_addr + size);
Suresh Siddha43a432b2009-04-09 14:26:47 -0700212 free_vm_area(area);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100213 return NULL;
214 }
215
Pekka Paalanend61fc442008-05-12 21:20:57 +0200216 ret_addr = (void __iomem *) (vaddr + offset);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200217 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200218
219 return ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222/**
223 * ioremap_nocache - map bus memory into CPU space
224 * @offset: bus address of the memory
225 * @size: size of the resource to map
226 *
227 * ioremap_nocache performs a platform specific sequence of operations to
228 * make bus memory CPU accessible via the readb/readw/readl/writeb/
229 * writew/writel functions and the other mmio helpers. The returned
230 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100231 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 *
233 * This version of ioremap ensures that the memory is marked uncachable
234 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100235 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 * busses. In particular driver authors should read up on PCI writes
237 *
238 * It's useful if some control registers are in such an area and
239 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100240 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 * Must be freed with iounmap.
242 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700243void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
Suresh Siddhade33c442008-04-25 17:07:22 -0700245 /*
246 * Ideally, this should be:
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200247 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700248 *
249 * Till we fix all X drivers to use ioremap_wc(), we will use
250 * UC MINUS.
251 */
252 unsigned long val = _PAGE_CACHE_UC_MINUS;
253
254 return __ioremap_caller(phys_addr, size, val,
Christoph Lameter23016962008-04-28 02:12:42 -0700255 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700257EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700259/**
260 * ioremap_wc - map memory into CPU space write combined
261 * @offset: bus address of the memory
262 * @size: size of the resource to map
263 *
264 * This version of ioremap ensures that the memory is marked write combining.
265 * Write combining allows faster writes to some hardware devices.
266 *
267 * Must be freed with iounmap.
268 */
venkatesh.pallipadi@intel.comd639bab2009-01-09 16:13:13 -0800269void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700270{
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200271 if (pat_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700272 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
273 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700274 else
275 return ioremap_nocache(phys_addr, size);
276}
277EXPORT_SYMBOL(ioremap_wc);
278
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700279void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100280{
Christoph Lameter23016962008-04-28 02:12:42 -0700281 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
282 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100283}
284EXPORT_SYMBOL(ioremap_cache);
285
Rik van Riel28b2ee22008-07-23 21:27:05 -0700286void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
287 unsigned long prot_val)
288{
289 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
290 __builtin_return_address(0));
291}
292EXPORT_SYMBOL(ioremap_prot);
293
Andi Kleenbf5421c2005-12-12 22:17:09 -0800294/**
295 * iounmap - Free a IO remapping
296 * @addr: virtual address from ioremap_*
297 *
298 * Caller must ensure there is only one unmapping for the same pointer.
299 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300void iounmap(volatile void __iomem *addr)
301{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800302 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700303
304 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 return;
306
307 /*
308 * __ioremap special-cases the PCI/ISA range by not instantiating a
309 * vm_area and by simply returning an address into the kernel mapping
310 * of ISA space. So handle that here.
311 */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200312 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
313 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 return;
315
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100316 addr = (volatile void __iomem *)
317 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800318
Pekka Paalanend61fc442008-05-12 21:20:57 +0200319 mmiotrace_iounmap(addr);
320
Andi Kleenbf5421c2005-12-12 22:17:09 -0800321 /* Use the vm area unlocked, assuming the caller
322 ensures there isn't another iounmap for the same address
323 in parallel. Reuse of the virtual address is prevented by
324 leaving it in the global lists until we're done with it.
325 cpa takes care of the direct mappings. */
326 read_lock(&vmlist_lock);
327 for (p = vmlist; p; p = p->next) {
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200328 if (p->addr == (void __force *)addr)
Andi Kleenbf5421c2005-12-12 22:17:09 -0800329 break;
330 }
331 read_unlock(&vmlist_lock);
332
333 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100334 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700335 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800336 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
338
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700339 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
340
Andi Kleenbf5421c2005-12-12 22:17:09 -0800341 /* Finally remove it */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200342 o = remove_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800343 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100344 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700346EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700348/*
349 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
350 * access
351 */
352void *xlate_dev_mem_ptr(unsigned long phys)
353{
354 void *addr;
355 unsigned long start = phys & PAGE_MASK;
356
357 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
358 if (page_is_ram(start >> PAGE_SHIFT))
359 return __va(phys);
360
Xiaotian Feng2fb8f4e2009-11-10 17:23:25 +0800361 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700362 if (addr)
363 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
364
365 return addr;
366}
367
368void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
369{
370 if (page_is_ram(phys >> PAGE_SHIFT))
371 return;
372
373 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
374 return;
375}
376
Jaswinder Singh4b6e9f22008-07-23 17:39:16 +0530377static int __initdata early_ioremap_debug;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100378
379static int __init early_ioremap_debug_setup(char *str)
380{
381 early_ioremap_debug = 1;
382
Huang, Ying793b24a2008-01-30 13:33:45 +0100383 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100384}
Huang, Ying793b24a2008-01-30 13:33:45 +0100385early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100386
Huang, Ying0947b2f2008-01-30 13:33:44 +0100387static __initdata int after_paging_init;
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700388static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100389
Ian Campbell551889a2008-02-09 23:24:09 +0100390static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100391{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100392 /* Don't assume we're using swapper_pg_dir at this point */
393 pgd_t *base = __va(read_cr3());
394 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100395 pud_t *pud = pud_offset(pgd, addr);
396 pmd_t *pmd = pmd_offset(pud, addr);
397
398 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100399}
400
Ian Campbell551889a2008-02-09 23:24:09 +0100401static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100402{
Ian Campbell551889a2008-02-09 23:24:09 +0100403 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100404}
405
Wang Chen88272472009-03-07 13:34:19 +0800406static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
407
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100408void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100409{
Ian Campbell551889a2008-02-09 23:24:09 +0100410 pmd_t *pmd;
Wang Chen88272472009-03-07 13:34:19 +0800411 int i;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100412
Ingo Molnard18d6d62008-01-30 13:33:45 +0100413 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100414 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100415
Wang Chen88272472009-03-07 13:34:19 +0800416 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
Wang Chen9f4f25c2009-03-25 14:07:11 +0100417 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
Wang Chen88272472009-03-07 13:34:19 +0800418
Ian Campbell551889a2008-02-09 23:24:09 +0100419 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700420 memset(bm_pte, 0, sizeof(bm_pte));
421 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100422
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100423 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100424 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100425 * we are not prepared:
426 */
Ian Campbell551889a2008-02-09 23:24:09 +0100427 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100428 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100429 printk(KERN_WARNING "pmd %p != %p\n",
430 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100431 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100432 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100433 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100434 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100435
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100436 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
437 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
438 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100439 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100440}
441
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100442void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100443{
Huang, Ying0947b2f2008-01-30 13:33:44 +0100444 after_paging_init = 1;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100445}
446
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100447static void __init __early_set_fixmap(enum fixed_addresses idx,
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700448 phys_addr_t phys, pgprot_t flags)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100449{
Ian Campbell551889a2008-02-09 23:24:09 +0100450 unsigned long addr = __fix_to_virt(idx);
451 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100452
453 if (idx >= __end_of_fixed_addresses) {
454 BUG();
455 return;
456 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100457 pte = early_ioremap_pte(addr);
Jeremy Fitzhardinge4583ed52008-06-25 00:19:03 -0400458
Huang, Ying0947b2f2008-01-30 13:33:44 +0100459 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100460 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100461 else
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400462 pte_clear(&init_mm, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100463 __flush_tlb_one(addr);
464}
465
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100466static inline void __init early_set_fixmap(enum fixed_addresses idx,
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700467 phys_addr_t phys, pgprot_t prot)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100468{
469 if (after_paging_init)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700470 __set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100471 else
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700472 __early_set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100473}
474
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100475static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100476{
477 if (after_paging_init)
478 clear_fixmap(idx);
479 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100480 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100481}
482
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700483static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700484static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
Wang Chen88272472009-03-07 13:34:19 +0800485
Ingo Molnard690b2a2008-01-30 13:33:47 +0100486static int __init check_early_ioremap_leak(void)
487{
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700488 int count = 0;
489 int i;
490
491 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
492 if (prev_map[i])
493 count++;
494
495 if (!count)
Ingo Molnard690b2a2008-01-30 13:33:47 +0100496 return 0;
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700497 WARN(1, KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100498 "Debug warning: early ioremap leak of %d areas detected.\n",
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700499 count);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100500 printk(KERN_WARNING
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700501 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100502
503 return 1;
504}
505late_initcall(check_early_ioremap_leak);
506
Wang Chen88272472009-03-07 13:34:19 +0800507static void __init __iomem *
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700508__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509{
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700510 unsigned long offset;
511 resource_size_t last_addr;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700512 unsigned int nrpages;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100513 enum fixed_addresses idx0, idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700514 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100515
516 WARN_ON(system_state != SYSTEM_BOOTING);
517
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700518 slot = -1;
519 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
520 if (!prev_map[i]) {
521 slot = i;
522 break;
523 }
524 }
525
526 if (slot < 0) {
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700527 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
528 (u64)phys_addr, size);
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700529 WARN_ON(1);
530 return NULL;
531 }
532
Ingo Molnard18d6d62008-01-30 13:33:45 +0100533 if (early_ioremap_debug) {
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700534 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
535 (u64)phys_addr, size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100536 dump_stack();
537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
539 /* Don't allow wraparound or zero size */
540 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100541 if (!size || last_addr < phys_addr) {
542 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100544 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700546 prev_size[slot] = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 /*
548 * Mappings have to be page-aligned
549 */
550 offset = phys_addr & ~PAGE_MASK;
551 phys_addr &= PAGE_MASK;
Alan Coxc613ec12008-10-10 10:46:45 +0100552 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 /*
555 * Mappings have to fit in the FIX_BTMAP area.
556 */
557 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100558 if (nrpages > NR_FIX_BTMAPS) {
559 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100561 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 /*
564 * Ok, go for it..
565 */
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700566 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100567 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 while (nrpages > 0) {
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700569 early_set_fixmap(idx, phys_addr, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 phys_addr += PAGE_SIZE;
571 --idx;
572 --nrpages;
573 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100574 if (early_ioremap_debug)
Wang Chen88272472009-03-07 13:34:19 +0800575 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100576
Wang Chen88272472009-03-07 13:34:19 +0800577 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700578 return prev_map[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579}
580
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700581/* Remap an IO device */
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700582void __init __iomem *
583early_ioremap(resource_size_t phys_addr, unsigned long size)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700584{
585 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
586}
587
588/* Remap memory */
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700589void __init __iomem *
590early_memremap(resource_size_t phys_addr, unsigned long size)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700591{
592 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
593}
594
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700595void __init early_iounmap(void __iomem *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
597 unsigned long virt_addr;
598 unsigned long offset;
599 unsigned int nrpages;
600 enum fixed_addresses idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700601 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100602
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700603 slot = -1;
604 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
605 if (prev_map[i] == addr) {
606 slot = i;
607 break;
608 }
609 }
610
611 if (slot < 0) {
612 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
613 addr, size);
614 WARN_ON(1);
Ingo Molnar226e9a92008-05-27 09:56:49 +0200615 return;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700616 }
617
618 if (prev_size[slot] != size) {
619 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
620 addr, size, slot, prev_size[slot]);
621 WARN_ON(1);
622 return;
623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Ingo Molnard18d6d62008-01-30 13:33:45 +0100625 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100626 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700627 size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100628 dump_stack();
629 }
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100632 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
633 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 offset = virt_addr & ~PAGE_MASK;
637 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
638
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700639 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100641 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 --idx;
643 --nrpages;
644 }
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700645 prev_map[slot] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646}