blob: 1e165a640b68d2a3d212242ed83473c98ae9c4da [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019
Russell King15d07dc2012-03-28 18:30:01 +010020#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010021#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000022#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050023#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010024#include <asm/setup.h>
25#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010026#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010027#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040028#include <asm/highmem.h>
David Howells9f97da72012-03-28 18:30:01 +010029#include <asm/system_info.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010030#include <asm/traps.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010031
32#include <asm/mach/arch.h>
33#include <asm/mach/map.h>
34
35#include "mm.h"
36
Russell Kingd111e8f2006-09-27 15:27:33 +010037/*
38 * empty_zero_page is a special page that is used for
39 * zero-initialized data and COW.
40 */
41struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040042EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010043
44/*
45 * The pmd table for the upper-most set of pages.
46 */
47pmd_t *top_pmd;
48
Russell Kingae8f1542006-09-27 15:38:34 +010049#define CPOLICY_UNCACHED 0
50#define CPOLICY_BUFFERED 1
51#define CPOLICY_WRITETHROUGH 2
52#define CPOLICY_WRITEBACK 3
53#define CPOLICY_WRITEALLOC 4
54
55static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
56static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010057pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010058pgprot_t pgprot_kernel;
59
Imre_Deak44b18692007-02-11 13:45:13 +010060EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010061EXPORT_SYMBOL(pgprot_kernel);
62
63struct cachepolicy {
64 const char policy[16];
65 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010066 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000067 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010068};
69
70static struct cachepolicy cache_policies[] __initdata = {
71 {
72 .policy = "uncached",
73 .cr_mask = CR_W|CR_C,
74 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010075 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010076 }, {
77 .policy = "buffered",
78 .cr_mask = CR_C,
79 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010080 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010081 }, {
82 .policy = "writethrough",
83 .cr_mask = 0,
84 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010085 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010086 }, {
87 .policy = "writeback",
88 .cr_mask = 0,
89 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010090 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010091 }, {
92 .policy = "writealloc",
93 .cr_mask = 0,
94 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +010095 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +010096 }
97};
98
99/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100100 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +0100101 * problems by allowing the cache or the cache and
102 * writebuffer to be turned off. (Note: the write
103 * buffer should not be on and the cache off).
104 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100105static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100106{
107 int i;
108
109 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
110 int len = strlen(cache_policies[i].policy);
111
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100112 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100113 cachepolicy = i;
114 cr_alignment &= ~cache_policies[i].cr_mask;
115 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100116 break;
117 }
118 }
119 if (i == ARRAY_SIZE(cache_policies))
120 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000121 /*
122 * This restriction is partly to do with the way we boot; it is
123 * unpredictable to have memory mapped using two different sets of
124 * memory attributes (shared, type, and cache attribs). We can not
125 * change these attributes once the initial assembly has setup the
126 * page tables.
127 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100128 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
129 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
130 cachepolicy = CPOLICY_WRITEBACK;
131 }
Russell Kingae8f1542006-09-27 15:38:34 +0100132 flush_cache_all();
133 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100134 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100135}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100136early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100137
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100138static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100139{
140 char *p = "buffered";
141 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100142 early_cachepolicy(p);
143 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100144}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100145early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100146
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100147static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100148{
149 char *p = "uncached";
150 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100151 early_cachepolicy(p);
152 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100153}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100154early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100155
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000156#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100157static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100158{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100159 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100160 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100161 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100162 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100164}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100165early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000166#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100167
168static int __init noalign_setup(char *__unused)
169{
170 cr_alignment &= ~CR_A;
171 cr_no_alignment &= ~CR_A;
172 set_cr(cr_alignment);
173 return 1;
174}
175__setup("noalign", noalign_setup);
176
Russell King255d1f82006-12-18 00:12:47 +0000177#ifndef CONFIG_SMP
178void adjust_cr(unsigned long mask, unsigned long set)
179{
180 unsigned long flags;
181
182 mask &= ~CR_A;
183
184 set &= mask;
185
186 local_irq_save(flags);
187
188 cr_no_alignment = (cr_no_alignment & ~mask) | set;
189 cr_alignment = (cr_alignment & ~mask) | set;
190
191 set_cr((get_cr() & ~mask) | set);
192
193 local_irq_restore(flags);
194}
195#endif
196
Russell King36bb94b2010-11-16 08:40:36 +0000197#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000198#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100199
Russell Kingb29e9f52007-04-21 10:47:29 +0100200static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100201 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100202 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
203 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100204 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000205 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100206 .domain = DOMAIN_IO,
207 },
208 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100209 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100210 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000211 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100212 .domain = DOMAIN_IO,
213 },
214 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100215 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100216 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
218 .domain = DOMAIN_IO,
219 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100220 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100222 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000223 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100224 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100225 },
Russell Kingebb4c652008-11-09 11:18:36 +0000226 [MT_UNCACHED] = {
227 .prot_pte = PROT_PTE_DEVICE,
228 .prot_l1 = PMD_TYPE_TABLE,
229 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
230 .domain = DOMAIN_IO,
231 },
Russell Kingae8f1542006-09-27 15:38:34 +0100232 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100233 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100234 .domain = DOMAIN_KERNEL,
235 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000236#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100237 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100238 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100239 .domain = DOMAIN_KERNEL,
240 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000241#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100242 [MT_LOW_VECTORS] = {
243 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000244 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100245 .prot_l1 = PMD_TYPE_TABLE,
246 .domain = DOMAIN_USER,
247 },
248 [MT_HIGH_VECTORS] = {
249 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000250 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100251 .prot_l1 = PMD_TYPE_TABLE,
252 .domain = DOMAIN_USER,
253 },
254 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000255 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100256 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100257 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100258 .domain = DOMAIN_KERNEL,
259 },
260 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100261 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100262 .domain = DOMAIN_KERNEL,
263 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100264 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100265 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000266 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100267 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100268 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
269 .domain = DOMAIN_KERNEL,
270 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100271 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100272 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000273 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100274 .prot_l1 = PMD_TYPE_TABLE,
275 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
276 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100277 },
278 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000279 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100280 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100281 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100282 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700283 [MT_MEMORY_SO] = {
284 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
285 L_PTE_MT_UNCACHED,
286 .prot_l1 = PMD_TYPE_TABLE,
287 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
288 PMD_SECT_UNCACHED | PMD_SECT_XN,
289 .domain = DOMAIN_KERNEL,
290 },
Russell Kingae8f1542006-09-27 15:38:34 +0100291};
292
Russell Kingb29e9f52007-04-21 10:47:29 +0100293const struct mem_type *get_mem_type(unsigned int type)
294{
295 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
296}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200297EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100298
Russell Kingae8f1542006-09-27 15:38:34 +0100299/*
300 * Adjust the PMD section entries according to the CPU in use.
301 */
302static void __init build_mem_type_table(void)
303{
304 struct cachepolicy *cp;
305 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100306 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100307 int cpu_arch = cpu_architecture();
308 int i;
309
Catalin Marinas11179d82007-07-20 11:42:24 +0100310 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100311#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100312 if (cachepolicy > CPOLICY_BUFFERED)
313 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100314#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100315 if (cachepolicy > CPOLICY_WRITETHROUGH)
316 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100317#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100318 }
Russell Kingae8f1542006-09-27 15:38:34 +0100319 if (cpu_arch < CPU_ARCH_ARMv5) {
320 if (cachepolicy >= CPOLICY_WRITEALLOC)
321 cachepolicy = CPOLICY_WRITEBACK;
322 ecc_mask = 0;
323 }
Russell Kingf00ec482010-09-04 10:47:48 +0100324 if (is_smp())
325 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100326
327 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000328 * Strip out features not present on earlier architectures.
329 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
330 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100331 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000332 if (cpu_arch < CPU_ARCH_ARMv5)
333 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
334 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
335 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
336 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
337 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100338
339 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000340 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
341 * "update-able on write" bit on ARM610). However, Xscale and
342 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100343 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000344 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100345 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100346 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100347 mem_types[i].prot_l1 &= ~PMD_BIT4;
348 }
349 } else if (cpu_arch < CPU_ARCH_ARMv6) {
350 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100351 if (mem_types[i].prot_l1)
352 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100353 if (mem_types[i].prot_sect)
354 mem_types[i].prot_sect |= PMD_BIT4;
355 }
356 }
Russell Kingae8f1542006-09-27 15:38:34 +0100357
Russell Kingb1cce6b2008-11-04 10:52:28 +0000358 /*
359 * Mark the device areas according to the CPU/architecture.
360 */
361 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
362 if (!cpu_is_xsc3()) {
363 /*
364 * Mark device regions on ARMv6+ as execute-never
365 * to prevent speculative instruction fetches.
366 */
367 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
368 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
369 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
370 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
371 }
372 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
373 /*
374 * For ARMv7 with TEX remapping,
375 * - shared device is SXCB=1100
376 * - nonshared device is SXCB=0100
377 * - write combine device mem is SXCB=0001
378 * (Uncached Normal memory)
379 */
380 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
381 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
382 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
383 } else if (cpu_is_xsc3()) {
384 /*
385 * For Xscale3,
386 * - shared device is TEXCB=00101
387 * - nonshared device is TEXCB=01000
388 * - write combine device mem is TEXCB=00100
389 * (Inner/Outer Uncacheable in xsc3 parlance)
390 */
391 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
392 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
393 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
394 } else {
395 /*
396 * For ARMv6 and ARMv7 without TEX remapping,
397 * - shared device is TEXCB=00001
398 * - nonshared device is TEXCB=01000
399 * - write combine device mem is TEXCB=00100
400 * (Uncached Normal in ARMv6 parlance).
401 */
402 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
403 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
404 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
405 }
406 } else {
407 /*
408 * On others, write combining is "Uncached/Buffered"
409 */
410 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
411 }
412
413 /*
414 * Now deal with the memory-type mappings
415 */
Russell Kingae8f1542006-09-27 15:38:34 +0100416 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100417 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
418
Russell Kingbb30f362008-09-06 20:04:59 +0100419 /*
420 * Only use write-through for non-SMP systems
421 */
Russell Kingf00ec482010-09-04 10:47:48 +0100422 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
Russell Kingbb30f362008-09-06 20:04:59 +0100423 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100424
425 /*
426 * Enable CPU-specific coherency if supported.
427 * (Only available on XSC3 at the moment.)
428 */
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100429 if (arch_is_coherent() && cpu_is_xsc3()) {
Russell Kingb1cce6b2008-11-04 10:52:28 +0000430 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100431 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
433 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
434 }
Russell Kingae8f1542006-09-27 15:38:34 +0100435 /*
436 * ARMv6 and above have extended page tables.
437 */
438 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000439#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100440 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100441 * Mark cache clean areas and XIP ROM read only
442 * from SVC mode and no access from userspace.
443 */
444 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
445 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
446 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000447#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100448
Russell Kingf00ec482010-09-04 10:47:48 +0100449 if (is_smp()) {
450 /*
451 * Mark memory with the "shared" attribute
452 * for SMP systems
453 */
454 user_pgprot |= L_PTE_SHARED;
455 kern_pgprot |= L_PTE_SHARED;
456 vecs_pgprot |= L_PTE_SHARED;
457 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
458 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
459 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
460 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
461 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
462 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
463 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
464 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
465 }
Russell Kingae8f1542006-09-27 15:38:34 +0100466 }
467
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100468 /*
469 * Non-cacheable Normal - intended for memory areas that must
470 * not cause dirty cache line writebacks when used
471 */
472 if (cpu_arch >= CPU_ARCH_ARMv6) {
473 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
474 /* Non-cacheable Normal is XCB = 001 */
475 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
476 PMD_SECT_BUFFERED;
477 } else {
478 /* For both ARMv6 and non-TEX-remapping ARMv7 */
479 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
480 PMD_SECT_TEX(1);
481 }
482 } else {
483 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
484 }
485
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000486#ifdef CONFIG_ARM_LPAE
487 /*
488 * Do not generate access flag faults for the kernel mappings.
489 */
490 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
491 mem_types[i].prot_pte |= PTE_EXT_AF;
492 mem_types[i].prot_sect |= PMD_SECT_AF;
493 }
494 kern_pgprot |= PTE_EXT_AF;
495 vecs_pgprot |= PTE_EXT_AF;
496#endif
497
Russell Kingae8f1542006-09-27 15:38:34 +0100498 for (i = 0; i < 16; i++) {
499 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100500 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100501 }
502
Russell Kingbb30f362008-09-06 20:04:59 +0100503 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
504 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100505
Imre_Deak44b18692007-02-11 13:45:13 +0100506 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100507 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000508 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100509
510 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
511 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
512 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100513 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
514 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100515 mem_types[MT_ROM].prot_sect |= cp->pmd;
516
517 switch (cp->pmd) {
518 case PMD_SECT_WT:
519 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
520 break;
521 case PMD_SECT_WB:
522 case PMD_SECT_WBWA:
523 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
524 break;
525 }
526 printk("Memory policy: ECC %sabled, Data cache %s\n",
527 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100528
529 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
530 struct mem_type *t = &mem_types[i];
531 if (t->prot_l1)
532 t->prot_l1 |= PMD_DOMAIN(t->domain);
533 if (t->prot_sect)
534 t->prot_sect |= PMD_DOMAIN(t->domain);
535 }
Russell Kingae8f1542006-09-27 15:38:34 +0100536}
537
Catalin Marinasd9073872010-09-13 16:01:24 +0100538#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
539pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
540 unsigned long size, pgprot_t vma_prot)
541{
542 if (!pfn_valid(pfn))
543 return pgprot_noncached(vma_prot);
544 else if (file->f_flags & O_SYNC)
545 return pgprot_writecombine(vma_prot);
546 return vma_prot;
547}
548EXPORT_SYMBOL(phys_mem_access_prot);
549#endif
550
Russell Kingae8f1542006-09-27 15:38:34 +0100551#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
552
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400553static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000554{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400555 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100556 memset(ptr, 0, sz);
557 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000558}
559
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400560static void __init *early_alloc(unsigned long sz)
561{
562 return early_alloc_aligned(sz, sz);
563}
564
Colin Crosse5e483d2011-08-11 17:15:24 -0700565static pte_t * __init early_pte_alloc(pmd_t *pmd)
566{
567 if (pmd_none(*pmd) || pmd_bad(*pmd))
568 return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
569 return pmd_page_vaddr(*pmd);
570}
571
572static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
573{
574 __pmd_populate(pmd, __pa(pte), prot);
575 BUG_ON(pmd_bad(*pmd));
576}
577
578static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
579 unsigned long addr, unsigned long prot)
Russell King4bb2e272010-07-01 18:33:29 +0100580{
581 if (pmd_none(*pmd)) {
Colin Crosse5e483d2011-08-11 17:15:24 -0700582 pte_t *pte = early_pte_alloc(pmd);
583 early_pte_install(pmd, pte, prot);
Russell King4bb2e272010-07-01 18:33:29 +0100584 }
585 BUG_ON(pmd_bad(*pmd));
586 return pte_offset_kernel(pmd, addr);
587}
588
Russell King24e6c692007-04-21 10:21:28 +0100589static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
590 unsigned long end, unsigned long pfn,
591 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100592{
Colin Crosse5e483d2011-08-11 17:15:24 -0700593 pte_t *start_pte = early_pte_alloc(pmd);
594 pte_t *pte = start_pte + pte_index(addr);
595
596 /* If replacing a section mapping, the whole section must be replaced */
597 BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
598
Russell King24e6c692007-04-21 10:21:28 +0100599 do {
Russell King40d192b2008-09-06 21:15:56 +0100600 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100601 pfn++;
602 } while (pte++, addr += PAGE_SIZE, addr != end);
Colin Crosse5e483d2011-08-11 17:15:24 -0700603 early_pte_install(pmd, start_pte, type->prot_l1);
Russell Kingae8f1542006-09-27 15:38:34 +0100604}
605
Russell King516295e2010-11-21 16:27:49 +0000606static void __init alloc_init_section(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000607 unsigned long end, phys_addr_t phys,
Colin Crosse5e483d2011-08-11 17:15:24 -0700608 const struct mem_type *type,
609 bool force_pages)
Russell Kingae8f1542006-09-27 15:38:34 +0100610{
Russell King516295e2010-11-21 16:27:49 +0000611 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100612
Russell King24e6c692007-04-21 10:21:28 +0100613 /*
614 * Try a section mapping - end, addr and phys must all be aligned
615 * to a section boundary. Note that PMDs refer to the individual
616 * L1 entries, whereas PGDs refer to a group of L1 entries making
617 * up one logical pointer to an L2 table.
618 */
Colin Crosse5e483d2011-08-11 17:15:24 -0700619 if (((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
Russell King24e6c692007-04-21 10:21:28 +0100620 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100621
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000622#ifndef CONFIG_ARM_LPAE
Russell King24e6c692007-04-21 10:21:28 +0100623 if (addr & SECTION_SIZE)
624 pmd++;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000625#endif
Russell King24e6c692007-04-21 10:21:28 +0100626
627 do {
628 *pmd = __pmd(phys | type->prot_sect);
629 phys += SECTION_SIZE;
630 } while (pmd++, addr += SECTION_SIZE, addr != end);
631
632 flush_pmd_entry(p);
633 } else {
634 /*
635 * No need to loop; pte's aren't interested in the
636 * individual L1 entries.
637 */
638 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100639 }
Russell Kingae8f1542006-09-27 15:38:34 +0100640}
641
Stephen Boyd14904922012-04-27 01:40:10 +0100642static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
Colin Crossf02fac62012-05-07 18:20:34 -0700643 unsigned long end, unsigned long phys, const struct mem_type *type,
644 bool force_pages)
Russell King516295e2010-11-21 16:27:49 +0000645{
646 pud_t *pud = pud_offset(pgd, addr);
647 unsigned long next;
648
649 do {
650 next = pud_addr_end(addr, end);
Colin Crosse5e483d2011-08-11 17:15:24 -0700651 alloc_init_section(pud, addr, next, phys, type, force_pages);
Russell King516295e2010-11-21 16:27:49 +0000652 phys += next - addr;
653 } while (pud++, addr = next, addr != end);
654}
655
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000656#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100657static void __init create_36bit_mapping(struct map_desc *md,
658 const struct mem_type *type)
659{
Russell King97092e02010-11-16 00:16:01 +0000660 unsigned long addr, length, end;
661 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100662 pgd_t *pgd;
663
664 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100665 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100666 length = PAGE_ALIGN(md->length);
667
668 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
669 printk(KERN_ERR "MM: CPU does not support supersection "
670 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100671 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100672 return;
673 }
674
675 /* N.B. ARMv6 supersections are only defined to work with domain 0.
676 * Since domain assignments can in fact be arbitrary, the
677 * 'domain == 0' check below is required to insure that ARMv6
678 * supersections are only allocated for domain 0 regardless
679 * of the actual domain assignments in use.
680 */
681 if (type->domain) {
682 printk(KERN_ERR "MM: invalid domain in supersection "
683 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100684 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100685 return;
686 }
687
688 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100689 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
690 " at 0x%08lx invalid alignment\n",
691 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100692 return;
693 }
694
695 /*
696 * Shift bits [35:32] of address into bits [23:20] of PMD
697 * (See ARMv6 spec).
698 */
699 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
700
701 pgd = pgd_offset_k(addr);
702 end = addr + length;
703 do {
Russell King516295e2010-11-21 16:27:49 +0000704 pud_t *pud = pud_offset(pgd, addr);
705 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100706 int i;
707
708 for (i = 0; i < 16; i++)
709 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
710
711 addr += SUPERSECTION_SIZE;
712 phys += SUPERSECTION_SIZE;
713 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
714 } while (addr != end);
715}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000716#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100717
Russell Kingae8f1542006-09-27 15:38:34 +0100718/*
719 * Create the page directory entries and any necessary
720 * page tables for the mapping specified by `md'. We
721 * are able to cope here with varying sizes and address
722 * offsets, and we take full advantage of sections and
723 * supersections.
724 */
Colin Crosse5e483d2011-08-11 17:15:24 -0700725static void __init create_mapping(struct map_desc *md, bool force_pages)
Russell Kingae8f1542006-09-27 15:38:34 +0100726{
Will Deaconcae62922011-02-15 12:42:57 +0100727 unsigned long addr, length, end;
728 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100729 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100730 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100731
732 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100733 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
734 " at 0x%08lx in user region\n",
735 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100736 return;
737 }
738
739 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400740 md->virtual >= PAGE_OFFSET &&
741 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100742 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400743 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100744 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100745 }
746
Russell Kingd5c98172007-04-21 10:05:32 +0100747 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100748
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000749#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100750 /*
751 * Catch 36-bit addresses
752 */
Russell King4a56c1e2007-04-21 10:16:48 +0100753 if (md->pfn >= 0x100000) {
754 create_36bit_mapping(md, type);
755 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100756 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000757#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100758
Russell King7b9c7b42007-07-04 21:16:33 +0100759 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100760 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100761 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100762
Russell King24e6c692007-04-21 10:21:28 +0100763 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100764 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100765 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100766 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100767 return;
768 }
769
Russell King24e6c692007-04-21 10:21:28 +0100770 pgd = pgd_offset_k(addr);
771 end = addr + length;
772 do {
773 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100774
Colin Crosse5e483d2011-08-11 17:15:24 -0700775 alloc_init_pud(pgd, addr, next, phys, type, force_pages);
Russell Kingae8f1542006-09-27 15:38:34 +0100776
Russell King24e6c692007-04-21 10:21:28 +0100777 phys += next - addr;
778 addr = next;
779 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100780}
781
782/*
783 * Create the architecture specific mappings
784 */
785void __init iotable_init(struct map_desc *io_desc, int nr)
786{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400787 struct map_desc *md;
788 struct vm_struct *vm;
Russell Kingae8f1542006-09-27 15:38:34 +0100789
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400790 if (!nr)
791 return;
792
793 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
794
795 for (md = io_desc; nr; md++, nr--) {
Colin Crosse5e483d2011-08-11 17:15:24 -0700796 create_mapping(md, false);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400797 vm->addr = (void *)(md->virtual & PAGE_MASK);
798 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
799 vm->phys_addr = __pfn_to_phys(md->pfn);
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400800 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
801 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400802 vm->caller = iotable_init;
803 vm_area_add_early(vm++);
804 }
Russell Kingae8f1542006-09-27 15:38:34 +0100805}
806
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400807static void * __initdata vmalloc_min =
808 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100809
810/*
811 * vmalloc=size forces the vmalloc area to be exactly 'size'
812 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400813 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100814 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100815static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100816{
Russell King79612392010-05-22 16:20:14 +0100817 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100818
819 if (vmalloc_reserve < SZ_16M) {
820 vmalloc_reserve = SZ_16M;
821 printk(KERN_WARNING
822 "vmalloc area too small, limiting to %luMB\n",
823 vmalloc_reserve >> 20);
824 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400825
826 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
827 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
828 printk(KERN_WARNING
829 "vmalloc area is too big, limiting to %luMB\n",
830 vmalloc_reserve >> 20);
831 }
Russell King79612392010-05-22 16:20:14 +0100832
833 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100834 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100835}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100836early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100837
Russell King8df65162010-10-27 19:57:38 +0100838static phys_addr_t lowmem_limit __initdata = 0;
839
Russell King0371d3f2011-07-05 19:58:29 +0100840void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200841{
Russell Kingdde58282009-08-15 12:36:00 +0100842 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200843
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400844 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400845 struct membank *bank = &meminfo.bank[j];
846 *bank = meminfo.bank[i];
847
Will Deacon77f73a22011-11-22 17:30:32 +0000848 if (bank->start > ULONG_MAX)
849 highmem = 1;
850
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400851#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +0100852 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +0100853 __va(bank->start) < (void *)PAGE_OFFSET)
854 highmem = 1;
855
856 bank->highmem = highmem;
857
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400858 /*
859 * Split those memory banks which are partially overlapping
860 * the vmalloc area greatly simplifying things later.
861 */
Will Deacon77f73a22011-11-22 17:30:32 +0000862 if (!highmem && __va(bank->start) < vmalloc_min &&
Russell King79612392010-05-22 16:20:14 +0100863 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400864 if (meminfo.nr_banks >= NR_BANKS) {
865 printk(KERN_CRIT "NR_BANKS too low, "
866 "ignoring high memory\n");
867 } else {
868 memmove(bank + 1, bank,
869 (meminfo.nr_banks - i) * sizeof(*bank));
870 meminfo.nr_banks++;
871 i++;
Russell King79612392010-05-22 16:20:14 +0100872 bank[1].size -= vmalloc_min - __va(bank->start);
873 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +0100874 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400875 j++;
876 }
Russell King79612392010-05-22 16:20:14 +0100877 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400878 }
879#else
Russell King041d7852009-09-27 17:40:42 +0100880 bank->highmem = highmem;
881
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400882 /*
Will Deacon77f73a22011-11-22 17:30:32 +0000883 * Highmem banks not allowed with !CONFIG_HIGHMEM.
884 */
885 if (highmem) {
886 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
887 "(!CONFIG_HIGHMEM).\n",
888 (unsigned long long)bank->start,
889 (unsigned long long)bank->start + bank->size - 1);
890 continue;
891 }
892
893 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400894 * Check whether this memory bank would entirely overlap
895 * the vmalloc area.
896 */
Russell King79612392010-05-22 16:20:14 +0100897 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f2009-03-28 19:18:05 +0100898 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +0000899 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400900 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +0000901 (unsigned long long)bank->start,
902 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400903 continue;
904 }
905
906 /*
907 * Check whether this memory bank would partially overlap
908 * the vmalloc area.
909 */
Russell King79612392010-05-22 16:20:14 +0100910 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400911 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +0100912 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +0000913 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
914 "to -%.8llx (vmalloc region overlap).\n",
915 (unsigned long long)bank->start,
916 (unsigned long long)bank->start + bank->size - 1,
917 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400918 bank->size = newsize;
919 }
920#endif
Will Deacon40f7bfe2011-05-19 13:22:48 +0100921 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
922 lowmem_limit = bank->start + bank->size;
923
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400924 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200925 }
Russell Kinge616c592009-09-27 20:55:43 +0100926#ifdef CONFIG_HIGHMEM
927 if (highmem) {
928 const char *reason = NULL;
929
930 if (cache_is_vipt_aliasing()) {
931 /*
932 * Interactions between kmap and other mappings
933 * make highmem support with aliasing VIPT caches
934 * rather difficult.
935 */
936 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +0100937 }
938 if (reason) {
939 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
940 reason);
941 while (j > 0 && meminfo.bank[j - 1].highmem)
942 j--;
943 }
944 }
945#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400946 meminfo.nr_banks = j;
Nicolas Pitre55a81732011-09-18 22:40:00 -0400947 high_memory = __va(lowmem_limit - 1) + 1;
Will Deacon40f7bfe2011-05-19 13:22:48 +0100948 memblock_set_current_limit(lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200949}
950
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400951static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100952{
953 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +0100954 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +0100955
956 /*
957 * Clear out all the mappings below the kernel image.
958 */
Catalin Marinase73fc882011-08-23 14:07:23 +0100959 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100960 pmd_clear(pmd_off_k(addr));
961
962#ifdef CONFIG_XIP_KERNEL
963 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +0100964 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100965#endif
Catalin Marinase73fc882011-08-23 14:07:23 +0100966 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100967 pmd_clear(pmd_off_k(addr));
968
969 /*
Russell King8df65162010-10-27 19:57:38 +0100970 * Find the end of the first block of lowmem.
971 */
972 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
973 if (end >= lowmem_limit)
974 end = lowmem_limit;
975
976 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100977 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400978 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +0100979 */
Russell King8df65162010-10-27 19:57:38 +0100980 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400981 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100982 pmd_clear(pmd_off_k(addr));
983}
984
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000985#ifdef CONFIG_ARM_LPAE
986/* the first page is reserved for pgd */
987#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
988 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
989#else
Catalin Marinase73fc882011-08-23 14:07:23 +0100990#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000991#endif
Catalin Marinase73fc882011-08-23 14:07:23 +0100992
Russell Kingd111e8f2006-09-27 15:27:33 +0100993/*
Russell King2778f622010-07-09 16:27:52 +0100994 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +0100995 */
Russell King2778f622010-07-09 16:27:52 +0100996void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100997{
Russell Kingd111e8f2006-09-27 15:27:33 +0100998 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100999 * Reserve the page tables. These are already in use,
1000 * and can only be in node 0.
1001 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001002 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001003
Russell Kingd111e8f2006-09-27 15:27:33 +01001004#ifdef CONFIG_SA1111
1005 /*
1006 * Because of the SA1111 DMA bug, we want to preserve our
1007 * precious DMA-able memory...
1008 */
Russell King2778f622010-07-09 16:27:52 +01001009 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +01001010#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001011}
1012
1013/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001014 * Set up the device mappings. Since we clear out the page tables for all
1015 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +01001016 * This means you have to be careful how you debug this function, or any
1017 * called function. This means you can't use any function or debugging
1018 * method which may touch any device, otherwise the kernel _will_ crash.
1019 */
1020static void __init devicemaps_init(struct machine_desc *mdesc)
1021{
1022 struct map_desc map;
1023 unsigned long addr;
Russell King94e5a852012-01-18 15:32:49 +00001024 void *vectors;
Russell Kingd111e8f2006-09-27 15:27:33 +01001025
1026 /*
1027 * Allocate the vector page early.
1028 */
Russell King94e5a852012-01-18 15:32:49 +00001029 vectors = early_alloc(PAGE_SIZE);
1030
1031 early_trap_init(vectors);
Russell Kingd111e8f2006-09-27 15:27:33 +01001032
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001033 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001034 pmd_clear(pmd_off_k(addr));
1035
1036 /*
1037 * Map the kernel if it is XIP.
1038 * It is always first in the modulearea.
1039 */
1040#ifdef CONFIG_XIP_KERNEL
1041 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001042 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001043 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001044 map.type = MT_ROM;
1045 create_mapping(&map);
1046#endif
1047
1048 /*
1049 * Map the cache flushing regions.
1050 */
1051#ifdef FLUSH_BASE
1052 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1053 map.virtual = FLUSH_BASE;
1054 map.length = SZ_1M;
1055 map.type = MT_CACHECLEAN;
1056 create_mapping(&map);
1057#endif
1058#ifdef FLUSH_BASE_MINICACHE
1059 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1060 map.virtual = FLUSH_BASE_MINICACHE;
1061 map.length = SZ_1M;
1062 map.type = MT_MINICLEAN;
1063 create_mapping(&map);
1064#endif
1065
1066 /*
1067 * Create a mapping for the machine vectors at the high-vectors
1068 * location (0xffff0000). If we aren't using high-vectors, also
1069 * create a mapping at the low-vectors virtual address.
1070 */
Russell King94e5a852012-01-18 15:32:49 +00001071 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
Russell Kingd111e8f2006-09-27 15:27:33 +01001072 map.virtual = 0xffff0000;
1073 map.length = PAGE_SIZE;
1074 map.type = MT_HIGH_VECTORS;
Colin Crosse5e483d2011-08-11 17:15:24 -07001075 create_mapping(&map, false);
Russell Kingd111e8f2006-09-27 15:27:33 +01001076
1077 if (!vectors_high()) {
1078 map.virtual = 0;
1079 map.type = MT_LOW_VECTORS;
Colin Crosse5e483d2011-08-11 17:15:24 -07001080 create_mapping(&map, false);
Russell Kingd111e8f2006-09-27 15:27:33 +01001081 }
1082
1083 /*
1084 * Ask the machine support to map in the statically mapped devices.
1085 */
1086 if (mdesc->map_io)
1087 mdesc->map_io();
1088
1089 /*
1090 * Finally flush the caches and tlb to ensure that we're in a
1091 * consistent state wrt the writebuffer. This also ensures that
1092 * any write-allocated cache lines in the vector page are written
1093 * back. After this point, we can start to touch devices again.
1094 */
1095 local_flush_tlb_all();
1096 flush_cache_all();
1097}
1098
Nicolas Pitred73cd422008-09-15 16:44:55 -04001099static void __init kmap_init(void)
1100{
1101#ifdef CONFIG_HIGHMEM
Colin Crosse5e483d2011-08-11 17:15:24 -07001102 pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
Russell King4bb2e272010-07-01 18:33:29 +01001103 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001104#endif
1105}
1106
Colin Crosse5e483d2011-08-11 17:15:24 -07001107
Russell Kinga2227122010-03-25 18:56:05 +00001108static void __init map_lowmem(void)
1109{
Russell King8df65162010-10-27 19:57:38 +01001110 struct memblock_region *reg;
Colin Crosse5e483d2011-08-11 17:15:24 -07001111 phys_addr_t start;
1112 phys_addr_t end;
1113 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001114
1115 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001116 for_each_memblock(memory, reg) {
Colin Crosse5e483d2011-08-11 17:15:24 -07001117 start = reg->base;
1118 end = start + reg->size;
Russell Kinga2227122010-03-25 18:56:05 +00001119
Russell King8df65162010-10-27 19:57:38 +01001120 if (end > lowmem_limit)
1121 end = lowmem_limit;
1122 if (start >= end)
1123 break;
1124
1125 map.pfn = __phys_to_pfn(start);
1126 map.virtual = __phys_to_virt(start);
1127 map.length = end - start;
1128 map.type = MT_MEMORY;
1129
Colin Crosse5e483d2011-08-11 17:15:24 -07001130 create_mapping(&map, false);
Russell Kinga2227122010-03-25 18:56:05 +00001131 }
Colin Crosse5e483d2011-08-11 17:15:24 -07001132
1133#ifdef CONFIG_DEBUG_RODATA
1134 start = __pa(_stext) & PMD_MASK;
1135 end = ALIGN(__pa(__end_rodata), PMD_SIZE);
1136
1137 map.pfn = __phys_to_pfn(start);
1138 map.virtual = __phys_to_virt(start);
1139 map.length = end - start;
1140 map.type = MT_MEMORY;
1141
1142 create_mapping(&map, true);
1143#endif
Russell Kinga2227122010-03-25 18:56:05 +00001144}
1145
Russell Kingd111e8f2006-09-27 15:27:33 +01001146/*
1147 * paging_init() sets up the page tables, initialises the zone memory
1148 * maps, and sets up the zero page, bad page and bad page tables.
1149 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001150void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001151{
1152 void *zero_page;
1153
Russell King0371d3f2011-07-05 19:58:29 +01001154 memblock_set_current_limit(lowmem_limit);
1155
Russell Kingd111e8f2006-09-27 15:27:33 +01001156 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001157 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001158 map_lowmem();
Russell Kingd111e8f2006-09-27 15:27:33 +01001159 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001160 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001161
1162 top_pmd = pmd_off_k(0xffff0000);
1163
Russell King3abe9d32010-03-25 17:02:59 +00001164 /* allocate the zero page. */
1165 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001166
Russell King8d717a52010-05-22 19:47:18 +01001167 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001168
Russell Kingd111e8f2006-09-27 15:27:33 +01001169 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001170 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001171}